From ee491da9e7b858a26a92084aeda4569c9f955401 Mon Sep 17 00:00:00 2001 From: Brandon Walker Date: Wed, 17 Jul 2019 09:34:13 -0400 Subject: [PATCH] create skeleton controller using knative/pkg injection infrastructure --- .errcheck.txt | 1 + .golangci.yml | 9 + Gopkg.lock | 626 +- Gopkg.toml | 94 +- cmd/controller/kodata/LICENSE | 1 + cmd/controller/kodata/VENDOR-LICENSE | 1 + cmd/controller/main.go | 33 + config/config-logging.yaml | 50 + config/config-observability.yaml | 53 + config/controller-service.yaml | 29 + config/controller.yaml | 54 + hack/update-codegen.sh | 12 +- hack/update-deps.sh | 4 +- hack/verify-codegen.sh | 4 +- .../v1alpha1/zz_generated.deepcopy.go | 6 +- pkg/client/clientset/versioned/clientset.go | 8 + .../versioned/fake/clientset_generated.go | 12 +- .../typed/triggers/v1alpha1/eventlistener.go | 17 - .../v1alpha1/fake/fake_eventlistener.go | 2 +- .../v1alpha1/fake/fake_triggerbinding.go | 2 +- .../v1alpha1/fake/fake_triggertemplate.go | 2 +- .../typed/triggers/v1alpha1/triggerbinding.go | 17 - .../triggers/v1alpha1/triggers_client.go | 3 +- .../triggers/v1alpha1/triggertemplate.go | 17 - .../internalinterfaces/factory_interfaces.go | 2 - pkg/client/injection/client/client.go | 49 + pkg/client/injection/client/fake/fake.go | 54 + .../informers/triggers/factory/fake/fake.go | 41 + .../triggers/factory/triggersfactory.go | 52 + .../v1alpha1/eventlistener/eventlistener.go | 52 + .../v1alpha1/eventlistener/fake/fake.go | 40 + .../v1alpha1/triggerbinding/fake/fake.go | 40 + .../v1alpha1/triggerbinding/triggerbinding.go | 52 + .../v1alpha1/triggertemplate/fake/fake.go | 40 + .../triggertemplate/triggertemplate.go | 52 + pkg/reconciler/reconciler.go | 129 + .../v1alpha1/eventlistener/controller.go | 68 + .../v1alpha1/eventlistener/eventlistener.go | 73 + test/e2e-common.sh | 52 + config/triggers.yaml => test/e2e-tests.sh | 37 +- third_party/VENDOR-LICENSE | 7053 +++++++++++++ .../utils => cloud.google.com/go}/LICENSE | 0 .../go/compute/metadata/metadata.go | 513 + .../monitoring/apiv3/alert_policy_client.go | 285 + .../go/monitoring/apiv3/doc.go | 105 + .../go/monitoring/apiv3/group_client.go | 370 + .../go/monitoring/apiv3/metric_client.go | 469 + .../apiv3/notification_channel_client.go | 385 + .../go/monitoring/apiv3/path_funcs.go | 107 + .../monitoring/apiv3/uptime_check_client.go | 369 + vendor/cloud.google.com/go/trace/apiv2/doc.go | 105 + .../go/trace/apiv2/path_funcs.go | 43 + .../go/trace/apiv2/trace_client.go | 155 + .../exporter/stackdriver/AUTHORS | 1 + .../exporter/stackdriver/LICENSE | 202 + .../exporter/stackdriver/label.go | 33 + .../exporter/stackdriver/metrics.go | 547 + .../aws_identity_doc_utils.go | 53 + .../monitoredresource/gcp_metadata_config.go | 90 + .../monitoredresource/monitored_resources.go | 217 + .../exporter/stackdriver/sanitize.go | 50 + .../exporter/stackdriver/stackdriver.go | 346 + .../exporter/stackdriver/stats.go | 571 ++ .../exporter/stackdriver/trace.go | 178 + .../exporter/stackdriver/trace_proto.go | 277 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../aws/aws-sdk-go/aws/awserr/error.go | 164 + .../aws/aws-sdk-go/aws/awserr/types.go | 221 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 + .../aws-sdk-go/aws/awsutil/string_value.go | 88 + .../aws/aws-sdk-go/aws/client/client.go | 96 + .../aws-sdk-go/aws/client/default_retryer.go | 116 + .../aws/aws-sdk-go/aws/client/logger.go | 190 + .../aws/client/metadata/client_info.go | 13 + .../github.com/aws/aws-sdk-go/aws/config.go | 536 + .../aws/aws-sdk-go/aws/context_1_5.go | 37 + .../aws/aws-sdk-go/aws/context_1_9.go | 11 + .../aws-sdk-go/aws/context_background_1_5.go | 56 + .../aws-sdk-go/aws/context_background_1_7.go | 20 + .../aws/aws-sdk-go/aws/context_sleep.go | 24 + .../aws/aws-sdk-go/aws/convert_types.go | 387 + .../aws-sdk-go/aws/corehandlers/handlers.go | 228 + .../aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/credentials/chain_provider.go | 100 + .../aws-sdk-go/aws/credentials/credentials.go | 293 + .../ec2rolecreds/ec2_role_provider.go | 180 + .../aws/credentials/endpointcreds/provider.go | 203 + .../aws/credentials/env_provider.go | 74 + .../aws/credentials/processcreds/provider.go | 425 + .../shared_credentials_provider.go | 150 + .../aws/credentials/static_provider.go | 55 + .../stscreds/assume_role_provider.go | 312 + .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 69 + .../aws/aws-sdk-go/aws/csm/enable.go | 89 + .../aws/aws-sdk-go/aws/csm/metric.go | 109 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 + .../aws-sdk-go/aws/csm/metric_exception.go | 26 + .../aws/aws-sdk-go/aws/csm/reporter.go | 265 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 207 + .../aws-sdk-go/aws/defaults/shared_config.go | 27 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 169 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 152 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 188 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 4684 +++++++++ .../aws/endpoints/dep_service_ids.go | 141 + .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 449 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 307 + .../aws/endpoints/v3model_codegen.go | 351 + .../github.com/aws/aws-sdk-go/aws/errors.go | 13 + .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 118 + .../aws/request/connection_reset_error.go | 18 + .../aws/aws-sdk-go/aws/request/handlers.go | 322 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 60 + .../aws/aws-sdk-go/aws/request/request.go | 688 ++ .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 33 + .../aws-sdk-go/aws/request/request_context.go | 14 + .../aws/request/request_context_1_6.go | 14 + .../aws/request/request_pagination.go | 264 + .../aws/aws-sdk-go/aws/request/retryer.go | 163 + .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 286 + .../aws/aws-sdk-go/aws/request/waiter.go | 295 + .../aws/session/cabundle_transport.go | 26 + .../aws/session/cabundle_transport_1_5.go | 22 + .../aws/session/cabundle_transport_1_6.go | 23 + .../aws/aws-sdk-go/aws/session/credentials.go | 207 + .../aws/aws-sdk-go/aws/session/doc.go | 273 + .../aws/aws-sdk-go/aws/session/env_config.go | 241 + .../aws/aws-sdk-go/aws/session/session.go | 608 ++ .../aws-sdk-go/aws/session/shared_config.go | 341 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 796 ++ vendor/github.com/aws/aws-sdk-go/aws/types.go | 207 + vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../aws/aws-sdk-go/internal/ini/ast.go | 120 + .../aws-sdk-go/internal/ini/comma_token.go | 11 + .../aws-sdk-go/internal/ini/comment_token.go | 35 + .../aws/aws-sdk-go/internal/ini/doc.go | 29 + .../aws-sdk-go/internal/ini/empty_token.go | 4 + .../aws/aws-sdk-go/internal/ini/expression.go | 24 + .../aws/aws-sdk-go/internal/ini/fuzz.go | 17 + .../aws/aws-sdk-go/internal/ini/ini.go | 51 + .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 349 + .../aws-sdk-go/internal/ini/literal_tokens.go | 324 + .../aws-sdk-go/internal/ini/newline_token.go | 30 + .../aws-sdk-go/internal/ini/number_helper.go | 152 + .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 + .../aws-sdk-go/internal/ini/parse_error.go | 43 + .../aws-sdk-go/internal/ini/parse_stack.go | 60 + .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 + .../aws/aws-sdk-go/internal/ini/skipper.go | 45 + .../aws/aws-sdk-go/internal/ini/statement.go | 35 + .../aws/aws-sdk-go/internal/ini/value_util.go | 284 + .../aws/aws-sdk-go/internal/ini/visitor.go | 166 + .../aws/aws-sdk-go/internal/ini/walker.go | 25 + .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/ecs_container.go | 12 + .../internal/shareddefaults/shared_config.go | 40 + .../aws/aws-sdk-go/private/protocol/host.go | 68 + .../private/protocol/host_prefix.go | 54 + .../private/protocol/idempotency.go | 75 + .../private/protocol/json/jsonutil/build.go | 296 + .../protocol/json/jsonutil/unmarshal.go | 250 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 246 + .../private/protocol/query/unmarshal.go | 39 + .../private/protocol/query/unmarshal_error.go | 69 + .../aws-sdk-go/private/protocol/rest/build.go | 310 + .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 225 + .../aws-sdk-go/private/protocol/timestamp.go | 72 + .../aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/xml/xmlutil/build.go | 306 + .../private/protocol/xml/xmlutil/unmarshal.go | 291 + .../protocol/xml/xmlutil/xml_to_struct.go | 148 + .../aws/aws-sdk-go/service/sts/api.go | 2580 +++++ .../aws/aws-sdk-go/service/sts/doc.go | 108 + .../aws/aws-sdk-go/service/sts/errors.go | 73 + .../aws/aws-sdk-go/service/sts/service.go | 95 + vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/stream.go | 316 + .../opencensus-proto/AUTHORS | 1 + .../opencensus-proto/LICENSE | 202 + .../gen-go/agent/common/v1/common.pb.go | 356 + .../gen-go/metrics/v1/metrics.pb.go | 1370 +++ .../gen-go/resource/v1/resource.pb.go | 99 + .../ghodss}/yaml/LICENSE | 0 .../ghodss}/yaml/fields.go | 1 - .../ghodss}/yaml/yaml.go | 68 +- vendor/github.com/golang/glog/LICENSE | 191 + vendor/github.com/golang/glog/glog.go | 1180 +++ vendor/github.com/golang/glog/glog_file.go | 124 + vendor/github.com/golang/groupcache/LICENSE | 191 + .../github.com/golang/groupcache/lru/lru.go | 133 + .../golang/protobuf/proto/decode.go | 1 + .../github.com/golang/protobuf/proto/equal.go | 3 +- .../golang/protobuf/proto/extensions.go | 78 +- .../github.com/golang/protobuf/proto/lib.go | 38 +- .../golang/protobuf/proto/message_set.go | 137 +- .../golang/protobuf/proto/pointer_reflect.go | 5 +- .../golang/protobuf/proto/pointer_unsafe.go | 15 +- .../golang/protobuf/proto/properties.go | 31 +- .../golang/protobuf/proto/table_marshal.go | 45 +- .../golang/protobuf/proto/table_unmarshal.go | 72 +- .../protoc-gen-go/descriptor/descriptor.pb.go | 2812 ++++++ .../golang/protobuf/ptypes/any/any.pb.go | 45 +- .../golang/protobuf/ptypes/duration.go | 2 +- .../protobuf/ptypes/duration/duration.pb.go | 26 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 79 + .../protobuf/ptypes/struct/struct.pb.go | 450 + .../golang/protobuf/ptypes/timestamp.go | 6 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 34 +- .../protobuf/ptypes/wrappers/wrappers.pb.go | 443 + vendor/github.com/google/btree/LICENSE | 202 + vendor/github.com/google/btree/btree.go | 890 ++ vendor/github.com/google/btree/btree_mem.go | 76 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 37 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 245 + vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 38 + vendor/github.com/googleapis/gax-go/LICENSE | 27 + .../googleapis/gax-go/v2/call_option.go | 161 + vendor/github.com/googleapis/gax-go/v2/gax.go | 39 + .../gax-go/v2/header.go} | 56 +- .../github.com/googleapis/gax-go/v2/invoke.go | 99 + .../gregjones/httpcache/LICENSE.txt | 7 + .../httpcache/diskcache/diskcache.go | 61 + .../gregjones/httpcache/httpcache.go | 551 + .../imdario/mergo}/LICENSE | 1 + vendor/github.com/imdario/mergo/doc.go | 44 + vendor/github.com/imdario/mergo/map.go | 175 + vendor/github.com/imdario/mergo/merge.go | 255 + vendor/github.com/imdario/mergo/mergo.go | 97 + .../imdario/mergo/testdata/license.yml | 4 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 842 ++ .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 ++ .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../github.com/knative/caching/.gitattributes | 6 + .../knative/caching/.github/issue-template.md | 34 + .../caching/.github/pull-request-template.md | 6 + vendor/github.com/knative/caching/.gitignore | 1 + .../knative/caching/CONTRIBUTING.md | 5 + .../github.com/knative/caching/DEVELOPMENT.md | 68 + vendor/github.com/knative/caching/Gopkg.lock | 538 + vendor/github.com/knative/caching/Gopkg.toml | 48 + vendor/github.com/knative/caching/LICENSE | 201 + vendor/github.com/knative/caching/OWNERS | 6 + vendor/github.com/knative/caching/README.md | 14 + .../knative/caching/code-of-conduct.md | 76 + .../knative/caching/config/image.yaml | 34 + vendor/github.com/knative/caching/hack/OWNERS | 4 + .../hack/boilerplate/add-boilerplate.sh | 38 + .../hack/boilerplate/boilerplate.go.txt | 12 +- .../hack/boilerplate/boilerplate.sh.txt | 15 + .../knative/caching/hack/update-codegen.sh | 34 + .../knative/caching/hack/update-deps.sh | 29 + .../knative/caching/hack/verify-codegen.sh | 55 + .../caching/pkg/apis/caching/register.go | 27 + .../caching/pkg/apis/caching/v1alpha1/doc.go | 19 + .../apis/caching/v1alpha1/image_defaults.go | 23 + .../pkg/apis/caching/v1alpha1/image_types.go | 180 + .../apis/caching/v1alpha1/image_validation.go | 46 + .../pkg/apis/caching}/v1alpha1/register.go | 25 +- .../v1alpha1}/zz_generated.deepcopy.go | 123 +- .../client/clientset/versioned/clientset.go | 98 + .../pkg/client/clientset/versioned}/doc.go | 6 +- .../versioned/fake/clientset_generated.go | 26 +- .../client}/clientset/versioned/fake/doc.go | 2 +- .../clientset/versioned/fake/register.go | 6 +- .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 6 +- .../typed/caching/v1alpha1/caching_client.go} | 39 +- .../versioned/typed/caching/v1alpha1}/doc.go | 4 +- .../typed/caching/v1alpha1}/fake/doc.go | 2 +- .../v1alpha1/fake/fake_caching_client.go} | 16 +- .../typed/caching/v1alpha1/fake/fake_image.go | 140 + .../caching}/v1alpha1/generated_expansion.go | 4 +- .../versioned/typed/caching/v1alpha1/image.go | 174 + .../externalversions/caching}/interface.go | 16 +- .../caching/v1alpha1/image.go | 89 + .../caching/v1alpha1}/interface.go | 16 +- .../informers/externalversions/factory.go | 14 +- .../informers/externalversions/generic.go | 12 +- .../internalinterfaces/factory_interfaces.go | 6 +- .../caching}/v1alpha1/expansion_generated.go | 12 +- .../client/listers/caching/v1alpha1/image.go | 94 + vendor/github.com/knative/caching/test/OWNERS | 7 + .../knative/caching/test/presubmit-tests.sh | 30 + vendor/github.com/knative/pkg/.gitattributes | 10 + .../knative/pkg/.github/issue-template.md | 32 + .../pkg/.github/pull-request-template.md | 6 + vendor/github.com/knative/pkg/.gitignore | 11 + vendor/github.com/knative/pkg/CONTRIBUTING.md | 5 + vendor/github.com/knative/pkg/DEVELOPMENT.md | 71 + vendor/github.com/knative/pkg/Gopkg.lock | 1203 +++ vendor/github.com/knative/pkg/Gopkg.toml | 78 + vendor/github.com/knative/pkg/OWNERS | 4 + vendor/github.com/knative/pkg/OWNERS_ALIASES | 65 + vendor/github.com/knative/pkg/README.md | 13 + vendor/github.com/knative/pkg/apis/OWNERS | 4 + .../github.com/knative/pkg/apis/duck/OWNERS | 4 + .../apis/duck/v1alpha1/addressable_types.go | 113 + .../pkg/apis/duck/v1alpha1/condition_set.go | 388 + .../apis/duck/v1alpha1/conditions_types.go | 202 + .../knative/pkg/apis/duck/v1alpha1}/doc.go | 13 +- .../duck/v1alpha1/legacy_targetable_types.go | 95 + .../pkg/apis/duck/v1alpha1/register.go | 59 + .../duck/v1alpha1/retired_targetable_types.go | 99 + .../duck/v1alpha1/zz_generated.deepcopy.go | 441 + .../github.com/knative/pkg/apis/istio/OWNERS | 4 + .../apis/istio/authentication/register.go} | 10 +- .../istio/authentication/v1alpha1/doc.go} | 18 +- .../authentication/v1alpha1/policy_types.go | 345 + .../authentication}/v1alpha1/register.go | 32 +- .../v1alpha1}/zz_generated.deepcopy.go | 205 +- .../pkg/apis/istio/common/v1alpha1/string.go | 35 + .../knative/pkg/apis/istio/register.go | 21 + .../knative/pkg/apis/istio/v1alpha3/README.md | 17 + .../istio/v1alpha3/destinationrule_types.go | 547 + .../knative/pkg/apis/istio/v1alpha3}/doc.go | 14 +- .../pkg/apis/istio/v1alpha3/gateway_types.go | 336 + .../pkg/apis/istio/v1alpha3}/register.go | 30 +- .../istio/v1alpha3/virtualservice_types.go | 882 ++ .../istio/v1alpha3/zz_generated.deepcopy.go | 1159 +++ .../knative/pkg/apis/testing/conditions.go | 61 + .../knative/pkg/changeset/commit.go | 64 + .../github.com/knative/pkg/changeset/doc.go | 23 + .../knative/pkg/changeset/testdata/HEAD | 1 + .../pkg/changeset/testdata/noncommitted/HEAD | 1 + .../client/clientset/versioned/clientset.go | 120 + .../pkg/client/clientset/versioned/doc.go | 20 + .../versioned/fake/clientset_generated.go | 94 + .../client/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 58 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../v1alpha1/authentication_client.go} | 43 +- .../typed/authentication/v1alpha1}/doc.go | 4 +- .../typed/authentication/v1alpha1/fake/doc.go | 20 + .../fake/fake_authentication_client.go | 40 + .../v1alpha1/fake/fake_policy.go | 128 + .../v1alpha1}/generated_expansion.go | 6 +- .../typed/authentication/v1alpha1/policy.go | 157 + .../typed/istio/v1alpha3/destinationrule.go | 157 + .../versioned/typed/istio/v1alpha3}/doc.go | 4 +- .../typed/istio/v1alpha3/fake/doc.go | 20 + .../v1alpha3/fake/fake_destinationrule.go | 128 + .../typed/istio/v1alpha3/fake/fake_gateway.go | 128 + .../istio/v1alpha3/fake/fake_istio_client.go | 48 + .../v1alpha3/fake/fake_virtualservice.go | 128 + .../versioned/typed/istio/v1alpha3/gateway.go | 157 + .../istio/v1alpha3}/generated_expansion.go | 10 +- .../typed/istio/v1alpha3/istio_client.go | 100 + .../typed/istio/v1alpha3/virtualservice.go | 157 + .../authentication}/interface.go | 8 +- .../authentication}/v1alpha1/interface.go | 14 +- .../authentication/v1alpha1/policy.go} | 56 +- .../informers/externalversions/factory.go | 186 + .../informers/externalversions/generic.go | 71 + .../internalinterfaces/factory_interfaces.go | 38 + .../externalversions/istio}/interface.go | 18 +- .../istio/v1alpha3/destinationrule.go | 89 + .../istio/v1alpha3/gateway.go} | 50 +- .../istio/v1alpha3}/interface.go | 30 +- .../istio/v1alpha3/virtualservice.go} | 50 +- .../pkg/client/injection/client/client.go | 49 + .../pkg/client/injection/client/fake/fake.go | 54 + .../factory/authenticationfactory.go | 52 + .../authentication/factory/fake/fake.go | 41 + .../v1alpha1/policy/fake/fake.go | 40 + .../authentication/v1alpha1/policy/policy.go | 52 + .../informers/istio/factory/fake/fake.go | 41 + .../informers/istio/factory/istiofactory.go | 52 + .../destinationrule/destinationrule.go | 52 + .../v1alpha3/destinationrule/fake/fake.go | 40 + .../istio/v1alpha3/gateway/fake/fake.go | 40 + .../istio/v1alpha3/gateway/gateway.go | 52 + .../v1alpha3/virtualservice/fake/fake.go | 40 + .../v1alpha3/virtualservice/virtualservice.go | 52 + .../v1alpha1}/expansion_generated.go | 16 +- .../listers/authentication/v1alpha1/policy.go | 94 + .../listers/istio/v1alpha3/destinationrule.go | 94 + .../istio/v1alpha3/expansion_generated.go | 43 + .../client/listers/istio/v1alpha3/gateway.go | 94 + .../listers/istio/v1alpha3/virtualservice.go | 94 + .../github.com/knative/pkg/cloudevents/OWNERS | 4 + .../knative/pkg/cloudevents/README.md | 151 + .../knative/pkg/cloudevents/builder.go | 135 + .../knative/pkg/cloudevents/client.go | 81 + .../github.com/knative/pkg/cloudevents/doc.go | 22 + .../pkg/cloudevents/encoding_binary.go | 125 + .../pkg/cloudevents/encoding_structured.go | 143 + .../knative/pkg/cloudevents/event.go | 212 + .../knative/pkg/cloudevents/event_v01.go | 236 + .../knative/pkg/cloudevents/event_v02.go | 261 + .../knative/pkg/cloudevents/handler.go | 401 + .../github.com/knative/pkg/code-of-conduct.md | 75 + .../codegen/cmd/injection-gen/args/args.go | 61 + .../cmd/injection-gen/generators/client.go | 102 + .../cmd/injection-gen/generators/factory.go | 106 + .../injection-gen/generators/fakeclient.go | 109 + .../injection-gen/generators/fakefactory.go | 97 + .../injection-gen/generators/fakeinformer.go | 114 + .../cmd/injection-gen/generators/informer.go | 123 + .../injection-gen/generators/namesystems.go | 71 + .../cmd/injection-gen/generators/packages.go | 360 + .../pkg/codegen/cmd/injection-gen/main.go | 59 + .../github.com/knative/pkg/configmap/OWNERS | 4 + .../pkg/configmap/testing/configmap.go | 97 + .../github.com/knative/pkg/controller/OWNERS | 4 + .../knative/pkg/controller/controller.go | 474 + .../knative/pkg/controller/helper.go | 67 + .../knative/pkg/controller/stats_reporter.go | 148 + .../controller/testing/fake_stats_reporter.go | 65 + vendor/github.com/knative/pkg/hack/OWNERS | 10 + .../pkg/hack/boilerplate/boilerplate.go.txt | 15 + .../knative/pkg/hack/generate-knative.sh | 78 + .../knative/pkg/hack/update-codegen.sh | 55 + .../knative/pkg/hack/update-deps.sh | 29 + .../knative/pkg/hack/verify-codegen.sh | 74 + .../github.com/knative/pkg/injection/OWNERS | 5 + .../knative/pkg/injection/README.md | 218 + .../knative/pkg/injection/clients.go | 42 + .../injection/clients/apiextclient/apiext.go | 49 + .../clients/apiextclient/fake/fake.go | 53 + .../clients/dynamicclient/dynamicclient.go | 49 + .../clients/dynamicclient/fake/fake.go | 53 + .../injection/clients/kubeclient/fake/fake.go | 53 + .../clients/kubeclient/kubeclient.go | 49 + .../github.com/knative/pkg/injection/doc.go | 68 + .../knative/pkg/injection/factories.go | 40 + .../knative/pkg/injection/informers.go | 68 + .../apiextensionsv1beta1/crd/crd.go | 52 + .../apiextensionsv1beta1/crd/fake/fake.go | 38 + .../apiextinformers/factory/factory.go | 53 + .../apiextinformers/factory/fake/fake.go | 40 + .../appsv1/deployment/deployment.go | 52 + .../appsv1/deployment/fake/fake.go | 38 + .../autoscalingv1/hpa/fake/fake.go | 38 + .../kubeinformers/autoscalingv1/hpa/hpa.go | 52 + .../autoscalingv2beta1/hpa/fake/fake.go | 38 + .../autoscalingv2beta1/hpa/hpa.go | 52 + .../kubeinformers/batchv1/job/fake/fake.go | 38 + .../kubeinformers/batchv1/job/job.go | 52 + .../corev1/configmap/configmap.go | 52 + .../corev1/configmap/fake/fake.go | 38 + .../corev1/endpoints/endpoints.go | 52 + .../corev1/endpoints/fake/fake.go | 38 + .../corev1/namespace/fake/fake.go | 38 + .../corev1/namespace/namespace.go | 52 + .../kubeinformers/corev1/pod/fake/fake.go | 38 + .../informers/kubeinformers/corev1/pod/pod.go | 52 + .../kubeinformers/corev1/secret/fake/fake.go | 38 + .../kubeinformers/corev1/secret/secret.go | 52 + .../kubeinformers/corev1/service/fake/fake.go | 38 + .../kubeinformers/corev1/service/service.go | 52 + .../corev1/serviceaccount/fake/fake.go | 38 + .../corev1/serviceaccount/service.go | 52 + .../kubeinformers/factory/factory.go | 52 + .../kubeinformers/factory/fake/fake.go | 40 + .../rbacv1/rolebinding/fake/fake.go | 38 + .../rbacv1/rolebinding/rolebinding.go | 52 + .../knative/pkg/injection/interface.go | 84 + .../knative/pkg/injection/sharedmain/main.go | 113 + vendor/github.com/knative/pkg/kmeta/OWNERS | 4 + .../github.com/knative/pkg/kmeta/accessor.go | 94 + vendor/github.com/knative/pkg/kmeta/doc.go | 19 + vendor/github.com/knative/pkg/kmeta/labels.go | 114 + .../knative/pkg/kmeta/owner_references.go | 38 + vendor/github.com/knative/pkg/logging/OWNERS | 4 + .../github.com/knative/pkg/logging/config.go | 198 + .../github.com/knative/pkg/logging/logger.go | 57 + .../knative/pkg/logging/logkey/constants.go | 65 + .../knative/pkg/logging/testing/util.go | 68 + .../pkg/logging/zz_generated.deepcopy.go | 48 + vendor/github.com/knative/pkg/metrics/OWNERS | 4 + .../github.com/knative/pkg/metrics/config.go | 293 + .../knative/pkg/metrics}/doc.go | 15 +- .../knative/pkg/metrics/exporter.go | 106 + .../knative/pkg/metrics/gcp_metadata.go | 40 + .../pkg/metrics/metricskey/constants.go | 79 + .../pkg/metrics/monitored_resources.go | 53 + .../pkg/metrics/prometheus_exporter.go | 74 + .../github.com/knative/pkg/metrics/record.go | 56 + .../pkg/metrics/stackdriver_exporter.go | 141 + .../knative/pkg/metrics/testing/config.go | 27 + vendor/github.com/knative/pkg/ptr/doc.go | 18 + vendor/github.com/knative/pkg/ptr/ptr.go | 41 + .../knative/pkg/reconciler/testing/actions.go | 76 + .../knative/pkg/reconciler/testing/clock.go | 29 + .../pkg/reconciler/testing/context.go} | 28 +- .../knative/pkg/reconciler/testing/events.go | 44 + .../testing/generate_name_reactor.go | 86 + .../knative/pkg/reconciler/testing/hooks.go | 183 + .../pkg/reconciler/testing/reactions.go | 66 + .../knative/pkg/reconciler/testing/sorter.go | 93 + .../knative/pkg/reconciler/testing/stats.go | 40 + .../knative/pkg/reconciler/testing/table.go | 365 + .../pkg/reconciler/testing/tracker.go} | 28 +- .../knative/pkg/reconciler/testing/util.go | 85 + .../github.com/knative/pkg/signals/signal.go | 83 + .../knative/pkg/signals/signal_posix.go | 26 + .../knative/pkg/signals/signal_windows.go} | 10 +- .../knative/pkg/system/clock.go} | 21 +- vendor/github.com/knative/pkg/system/names.go | 52 + .../knative/pkg/system/testing/names.go | 27 + vendor/github.com/knative/pkg/test/OWNERS | 10 + vendor/github.com/knative/pkg/test/README.md | 259 + vendor/github.com/knative/pkg/test/cleanup.go | 40 + vendor/github.com/knative/pkg/test/clients.go | 114 + vendor/github.com/knative/pkg/test/crd.go | 95 + .../github.com/knative/pkg/test/e2e_flags.go | 82 + .../knative/pkg/test/helpers/data.go | 77 + .../knative/pkg/test/ingress/ingress.go | 72 + .../knative/pkg/test/kube_checks.go | 132 + .../knative/pkg/test/logging/logging.go | 146 + .../knative/pkg/test/monitoring/doc.go | 32 + .../knative/pkg/test/monitoring/monitoring.go | 85 + .../knative/pkg/test/presubmit-tests.sh | 31 + vendor/github.com/knative/pkg/test/request.go | 154 + .../knative/pkg/test/spoof/error_checks.go | 52 + .../knative/pkg/test/spoof/spoof.go | 241 + .../github.com/knative/pkg/test/zipkin/doc.go | 41 + .../knative/pkg/test/zipkin/util.go | 145 + vendor/github.com/knative/pkg/testing/doc.go | 19 + .../pkg/testing/inner_default_resource.go | 157 + .../knative/pkg/testing/register.go | 42 + .../knative/pkg/testing/resource.go | 190 + .../pkg/testing/zz_generated.deepcopy.go | 285 + .../knative/pkg/tracing/config}/doc.go | 10 +- .../knative/pkg/tracing/config/tracing.go | 95 + .../tracing/config/zz_generated.deepcopy.go} | 39 +- vendor/github.com/knative/pkg/tracing/http.go | 28 + .../knative/pkg/tracing/opencensus.go | 136 + .../github.com/knative/pkg/tracing/zipkin.go | 36 + vendor/github.com/knative/pkg/tracker/doc.go | 21 + .../github.com/knative/pkg/tracker/enqueue.go | 169 + .../knative/pkg/tracker/interface.go | 33 + .../github.com/knative/pkg/version/version.go | 55 + vendor/github.com/knative/pkg/webhook/OWNERS | 4 + .../github.com/knative/pkg/webhook/certs.go | 165 + .../github.com/knative/pkg/webhook/webhook.go | 619 ++ .../knative/pkg/websocket/connection.go | 325 + .../knative/pkg/websocket/hijack.go | 32 + .../knative/test-infra/scripts/README.md | 293 - .../knative/test-infra/scripts/e2e-tests.sh | 473 - .../knative/test-infra/scripts/library.sh | 525 - .../scripts/markdown-link-check-config.rc | 5 - .../scripts/markdown-lint-config.rc | 5 - .../test-infra/scripts/presubmit-tests.sh | 381 - .../knative/test-infra/scripts/release.sh | 558 - .../test-infra/tools/dep-collector/README.md | 91 - .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + vendor/github.com/petar/GoLLRB/AUTHORS | 4 + vendor/github.com/petar/GoLLRB/LICENSE | 27 + vendor/github.com/petar/GoLLRB/llrb/avgvar.go | 39 + .../github.com/petar/GoLLRB/llrb/iterator.go | 95 + .../petar/GoLLRB/llrb/llrb-stats.go | 46 + vendor/github.com/petar/GoLLRB/llrb/llrb.go | 456 + vendor/github.com/petar/GoLLRB/llrb/util.go | 17 + vendor/github.com/peterbourgon/diskv/LICENSE | 19 + .../peterbourgon/diskv/compression.go | 64 + vendor/github.com/peterbourgon/diskv/diskv.go | 726 ++ vendor/github.com/peterbourgon/diskv/index.go | 115 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../client_golang/prometheus/build_info.go | 29 + .../prometheus/build_info_pre_1.12.go | 22 + .../client_golang/prometheus/collector.go | 120 + .../client_golang/prometheus/counter.go | 277 + .../client_golang/prometheus/desc.go | 184 + .../client_golang/prometheus/doc.go | 200 + .../prometheus/expvar_collector.go | 119 + .../client_golang/prometheus/fnv.go | 42 + .../client_golang/prometheus/gauge.go | 286 + .../client_golang/prometheus/go_collector.go | 396 + .../client_golang/prometheus/histogram.go | 586 ++ .../prometheus/internal/metric.go | 85 + .../client_golang/prometheus/labels.go | 87 + .../client_golang/prometheus/metric.go | 174 + .../client_golang/prometheus/observer.go | 52 + .../prometheus/process_collector.go | 151 + .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_windows.go | 112 + .../prometheus/promhttp/delegator.go | 357 + .../client_golang/prometheus/promhttp/http.go | 349 + .../prometheus/promhttp/instrument_client.go | 219 + .../prometheus/promhttp/instrument_server.go | 447 + .../client_golang/prometheus/registry.go | 945 ++ .../client_golang/prometheus/summary.go | 736 ++ .../client_golang/prometheus/timer.go | 54 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 162 + .../client_golang/prometheus/vec.go | 472 + .../client_golang/prometheus/wrap.go | 200 + .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 629 ++ .../prometheus/client_model/ruby/LICENSE | 201 + vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 429 + .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 468 + .../prometheus/common/expfmt/text_parse.go | 757 ++ .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 102 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 270 + .../prometheus/common/model/value.go | 416 + vendor/github.com/prometheus/procfs/LICENSE | 201 + vendor/github.com/prometheus/procfs/NOTICE | 7 + .../github.com/prometheus/procfs/buddyinfo.go | 85 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 43 + .../prometheus/procfs/internal/fs/fs.go | 55 + vendor/github.com/prometheus/procfs/ipvs.go | 239 + vendor/github.com/prometheus/procfs/mdstat.go | 194 + .../github.com/prometheus/procfs/mountinfo.go | 178 + .../prometheus/procfs/mountstats.go | 621 ++ .../github.com/prometheus/procfs/net_dev.go | 206 + .../github.com/prometheus/procfs/net_unix.go | 275 + vendor/github.com/prometheus/procfs/proc.go | 281 + .../prometheus/procfs/proc_environ.go | 43 + .../github.com/prometheus/procfs/proc_io.go | 65 + .../prometheus/procfs/proc_limits.go | 157 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_psi.go | 101 + .../github.com/prometheus/procfs/proc_stat.go | 198 + .../prometheus/procfs/proc_status.go | 162 + vendor/github.com/prometheus/procfs/stat.go | 244 + vendor/github.com/prometheus/procfs/xfrm.go | 187 + .../pipeline/pkg/apis/config/default.go | 2 + .../pkg/apis/config/zz_generated.deepcopy.go | 2 +- .../pipeline/v1alpha1/pipeline_validation.go | 2 +- .../pipeline/v1alpha1/pipelinerun_types.go | 5 + .../v1alpha1/pipelinerun_validation.go | 4 +- .../pkg/apis/pipeline/v1alpha1/pod.go | 62 + .../apis/pipeline/v1alpha1/taskrun_types.go | 5 + .../pipeline/v1alpha1/taskrun_validation.go | 7 + .../v1alpha1/zz_generated.deepcopy.go | 257 +- .../client}/clientset/versioned/clientset.go | 28 +- .../pkg/client}/clientset/versioned/doc.go | 2 +- .../client}/clientset/versioned/scheme/doc.go | 2 +- .../clientset/versioned/scheme/register.go | 56 + .../typed/pipeline/v1alpha1/clustertask.go | 147 + .../versioned/typed/pipeline}/v1alpha1/doc.go | 2 +- .../pipeline/v1alpha1/generated_expansion.go | 31 + .../typed/pipeline/v1alpha1/pipeline.go | 174 + .../pipeline/v1alpha1/pipeline_client.go | 115 + .../pipeline/v1alpha1/pipelineresource.go | 174 + .../typed/pipeline/v1alpha1/pipelinerun.go | 174 + .../versioned/typed/pipeline/v1alpha1/task.go | 157 + .../typed/pipeline/v1alpha1/taskrun.go | 174 + .../pkg/client/injection/client/client.go | 49 + vendor/go.opencensus.io/AUTHORS | 1 + vendor/go.opencensus.io/LICENSE | 202 + .../exporter/prometheus/prometheus.go | 295 + vendor/go.opencensus.io/internal/internal.go | 37 + vendor/go.opencensus.io/internal/sanitize.go | 50 + .../internal/tagencoding/tagencoding.go | 75 + .../internal/traceinternals.go | 53 + .../go.opencensus.io/metric/metricdata/doc.go | 19 + .../metric/metricdata/exemplar.go | 33 + .../metric/metricdata/label.go | 28 + .../metric/metricdata/metric.go | 46 + .../metric/metricdata/point.go | 193 + .../metric/metricdata/type_string.go | 16 + .../metric/metricdata/unit.go | 27 + .../metric/metricproducer/manager.go | 78 + .../metric/metricproducer/producer.go | 28 + vendor/go.opencensus.io/opencensus.go | 21 + .../go.opencensus.io/plugin/ocgrpc/client.go | 56 + .../plugin/ocgrpc/client_metrics.go | 107 + .../plugin/ocgrpc/client_stats_handler.go | 49 + vendor/go.opencensus.io/plugin/ocgrpc/doc.go | 19 + .../go.opencensus.io/plugin/ocgrpc/server.go | 80 + .../plugin/ocgrpc/server_metrics.go | 97 + .../plugin/ocgrpc/server_stats_handler.go | 63 + .../plugin/ocgrpc/stats_common.go | 208 + .../plugin/ocgrpc/trace_common.go | 107 + .../go.opencensus.io/plugin/ochttp/client.go | 117 + .../plugin/ochttp/client_stats.go | 143 + vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 + .../plugin/ochttp/propagation/b3/b3.go | 123 + .../go.opencensus.io/plugin/ochttp/route.go | 61 + .../go.opencensus.io/plugin/ochttp/server.go | 440 + .../ochttp/span_annotating_client_trace.go | 169 + .../go.opencensus.io/plugin/ochttp/stats.go | 292 + .../go.opencensus.io/plugin/ochttp/trace.go | 239 + .../plugin/ochttp/wrapped_body.go | 44 + vendor/go.opencensus.io/resource/resource.go | 164 + vendor/go.opencensus.io/stats/doc.go | 69 + .../go.opencensus.io/stats/internal/record.go | 25 + vendor/go.opencensus.io/stats/measure.go | 109 + .../go.opencensus.io/stats/measure_float64.go | 55 + .../go.opencensus.io/stats/measure_int64.go | 55 + vendor/go.opencensus.io/stats/record.go | 69 + vendor/go.opencensus.io/stats/units.go | 25 + .../stats/view/aggregation.go | 120 + .../stats/view/aggregation_data.go | 293 + .../go.opencensus.io/stats/view/collector.go | 86 + vendor/go.opencensus.io/stats/view/doc.go | 47 + vendor/go.opencensus.io/stats/view/export.go | 58 + vendor/go.opencensus.io/stats/view/view.go | 221 + .../stats/view/view_to_metric.go | 131 + vendor/go.opencensus.io/stats/view/worker.go | 279 + .../stats/view/worker_commands.go | 182 + vendor/go.opencensus.io/tag/context.go | 43 + vendor/go.opencensus.io/tag/doc.go | 26 + vendor/go.opencensus.io/tag/key.go | 35 + vendor/go.opencensus.io/tag/map.go | 197 + vendor/go.opencensus.io/tag/map_codec.go | 237 + vendor/go.opencensus.io/tag/profile_19.go | 31 + vendor/go.opencensus.io/tag/profile_not19.go | 23 + vendor/go.opencensus.io/tag/validate.go | 56 + vendor/go.opencensus.io/trace/basetypes.go | 119 + vendor/go.opencensus.io/trace/config.go | 86 + vendor/go.opencensus.io/trace/doc.go | 53 + vendor/go.opencensus.io/trace/evictedqueue.go | 38 + vendor/go.opencensus.io/trace/export.go | 97 + .../trace/internal/internal.go | 22 + vendor/go.opencensus.io/trace/lrumap.go | 37 + .../trace/propagation/propagation.go | 108 + vendor/go.opencensus.io/trace/sampling.go | 75 + vendor/go.opencensus.io/trace/spanbucket.go | 130 + vendor/go.opencensus.io/trace/spanstore.go | 306 + vendor/go.opencensus.io/trace/status_codes.go | 37 + vendor/go.opencensus.io/trace/trace.go | 598 ++ vendor/go.opencensus.io/trace/trace_go11.go | 32 + .../go.opencensus.io/trace/trace_nongo11.go | 25 + .../trace/tracestate/tracestate.go | 147 + vendor/go.uber.org/atomic/LICENSE.txt | 19 + vendor/go.uber.org/atomic/atomic.go | 351 + vendor/go.uber.org/atomic/error.go | 55 + vendor/go.uber.org/atomic/string.go | 49 + vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/error.go | 401 + vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/array.go | 320 + vendor/go.uber.org/zap/buffer/buffer.go | 115 + vendor/go.uber.org/zap/buffer/pool.go | 49 + vendor/go.uber.org/zap/config.go | 243 + vendor/go.uber.org/zap/doc.go | 113 + vendor/go.uber.org/zap/encoder.go | 75 + vendor/go.uber.org/zap/error.go | 80 + vendor/go.uber.org/zap/field.go | 310 + vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/global.go | 169 + vendor/go.uber.org/zap/http_handler.go | 81 + .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 64 + vendor/go.uber.org/zap/level.go | 132 + vendor/go.uber.org/zap/logger.go | 305 + vendor/go.uber.org/zap/options.go | 109 + vendor/go.uber.org/zap/sink.go | 161 + vendor/go.uber.org/zap/stacktrace.go | 126 + vendor/go.uber.org/zap/sugar.go | 304 + vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 99 + .../zap/zapcore/console_encoder.go | 147 + vendor/go.uber.org/zap/zapcore/core.go | 113 + vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 348 + vendor/go.uber.org/zap/zapcore/entry.go | 257 + vendor/go.uber.org/zap/zapcore/error.go | 120 + vendor/go.uber.org/zap/zapcore/field.go | 201 + vendor/go.uber.org/zap/zapcore/hook.go | 68 + .../go.uber.org/zap/zapcore/json_encoder.go | 502 + vendor/go.uber.org/zap/zapcore/level.go | 175 + .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 53 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 + vendor/go.uber.org/zap/zapcore/sampler.go | 134 + vendor/go.uber.org/zap/zapcore/tee.go | 81 + .../go.uber.org/zap/zapcore/write_syncer.go | 123 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 +++ .../golang.org/x/oauth2/google/appengine.go | 38 + .../x/oauth2/google/appengine_gen1.go | 77 + .../x/oauth2/google/appengine_gen2_flex.go | 27 + vendor/golang.org/x/oauth2/google/default.go | 154 + vendor/golang.org/x/oauth2/google/doc.go | 40 + vendor/golang.org/x/oauth2/google/google.go | 209 + vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/sdk.go | 201 + vendor/golang.org/x/oauth2/jws/jws.go | 182 + vendor/golang.org/x/oauth2/jwt/jwt.go | 185 + vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 + .../golang => golang.org/x/sync}/PATENTS | 0 .../golang.org/x/sync/semaphore/semaphore.go | 127 + .../x/sys/unix/zerrors_linux_386.go | 34 + .../x/sys/unix/zerrors_linux_amd64.go | 34 + .../x/sys/unix/zerrors_linux_arm.go | 34 + .../x/sys/unix/zerrors_linux_arm64.go | 34 + .../x/sys/unix/zerrors_linux_mips.go | 34 + .../x/sys/unix/zerrors_linux_mips64.go | 34 + .../x/sys/unix/zerrors_linux_mips64le.go | 34 + .../x/sys/unix/zerrors_linux_mipsle.go | 34 + .../x/sys/unix/zerrors_linux_ppc64.go | 34 + .../x/sys/unix/zerrors_linux_ppc64le.go | 34 + .../x/sys/unix/zerrors_linux_riscv64.go | 34 + .../x/sys/unix/zerrors_linux_s390x.go | 34 + .../x/sys/unix/zerrors_linux_sparc64.go | 34 + .../x/sys/unix/zsysnum_linux_386.go | 6 + .../x/sys/unix/zsysnum_linux_amd64.go | 6 + .../x/sys/unix/zsysnum_linux_arm.go | 6 + .../x/sys/unix/zsysnum_linux_arm64.go | 6 + .../x/sys/unix/zsysnum_linux_mips.go | 6 + .../x/sys/unix/zsysnum_linux_mips64.go | 6 + .../x/sys/unix/zsysnum_linux_mips64le.go | 6 + .../x/sys/unix/zsysnum_linux_mipsle.go | 6 + .../x/sys/unix/zsysnum_linux_ppc64.go | 6 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 6 + .../x/sys/unix/zsysnum_linux_riscv64.go | 6 + .../x/sys/unix/zsysnum_linux_s390x.go | 6 + .../x/sys/unix/zsysnum_linux_sparc64.go | 6 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 37 + .../x/sys/unix/ztypes_linux_amd64.go | 38 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 37 + .../x/sys/unix/ztypes_linux_arm64.go | 38 + .../x/sys/unix/ztypes_linux_mips.go | 37 + .../x/sys/unix/ztypes_linux_mips64.go | 38 + .../x/sys/unix/ztypes_linux_mips64le.go | 38 + .../x/sys/unix/ztypes_linux_mipsle.go | 37 + .../x/sys/unix/ztypes_linux_ppc64.go | 38 + .../x/sys/unix/ztypes_linux_ppc64le.go | 38 + .../x/sys/unix/ztypes_linux_riscv64.go | 38 + .../x/sys/unix/ztypes_linux_s390x.go | 38 + .../x/sys/unix/ztypes_linux_sparc64.go | 38 + .../x/tools/go/packages/external.go | 29 +- .../golang.org/x/tools/go/packages/golist.go | 12 +- .../x/tools/go/packages/packages.go | 17 + .../x/tools/internal/imports/fix.go | 16 +- .../x/tools/internal/imports/mod.go | 66 +- .../v1/gonum/.travis/deps.d/linux/01-deps.sh | 1 - .../v1/gonum/.travis/deps.d/osx/nothing.sh | 1 - .../gonum/.travis/deps.d/windows/nothing.sh | 1 - .../.travis/run.d/linux/01-check-copyright.sh | 1 - .../.travis/run.d/linux/02-check-imports.sh | 1 - .../run.d/linux/03-check-formatting.sh | 1 - .../v1/gonum/.travis/run.d/linux/04-test.sh | 1 - .../.travis/run.d/linux/05-test-coverage.sh | 1 - .../.travis/run.d/linux/06-check-generate.sh | 1 - .../v1/gonum/.travis/run.d/osx/01-test.sh | 1 - vendor/gonum.org/v1/gonum/AUTHORS | 92 - vendor/gonum.org/v1/gonum/CONTRIBUTORS | 94 - vendor/gonum.org/v1/gonum/LICENSE | 23 - vendor/gonum.org/v1/gonum/blas/blas.go | 283 - .../gonum.org/v1/gonum/blas/blas64/blas64.go | 469 - vendor/gonum.org/v1/gonum/blas/blas64/conv.go | 277 - .../v1/gonum/blas/blas64/conv_symmetric.go | 153 - vendor/gonum.org/v1/gonum/blas/blas64/doc.go | 6 - .../v1/gonum/blas/cblas128/cblas128.go | 508 - .../gonum.org/v1/gonum/blas/cblas128/conv.go | 279 - .../v1/gonum/blas/cblas128/conv_hermitian.go | 155 - .../v1/gonum/blas/cblas128/conv_symmetric.go | 155 - .../gonum.org/v1/gonum/blas/cblas128/doc.go | 6 - vendor/gonum.org/v1/gonum/blas/doc.go | 108 - vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go | 314 - vendor/gonum.org/v1/gonum/blas/gonum/doc.go | 88 - .../gonum.org/v1/gonum/blas/gonum/errors.go | 35 - vendor/gonum.org/v1/gonum/blas/gonum/gemv.go | 190 - vendor/gonum.org/v1/gonum/blas/gonum/gonum.go | 58 - .../v1/gonum/blas/gonum/level1cmplx128.go | 445 - .../v1/gonum/blas/gonum/level1cmplx64.go | 467 - .../v1/gonum/blas/gonum/level1float32.go | 644 -- .../gonum/blas/gonum/level1float32_dsdot.go | 53 - .../v1/gonum/blas/gonum/level1float32_sdot.go | 53 - .../gonum/blas/gonum/level1float32_sdsdot.go | 53 - .../v1/gonum/blas/gonum/level1float64.go | 620 -- .../v1/gonum/blas/gonum/level1float64_ddot.go | 49 - .../v1/gonum/blas/gonum/level2cmplx128.go | 2906 ------ .../v1/gonum/blas/gonum/level2cmplx64.go | 2942 ------ .../v1/gonum/blas/gonum/level2float32.go | 2296 ----- .../v1/gonum/blas/gonum/level2float64.go | 2264 ----- .../v1/gonum/blas/gonum/level3cmplx128.go | 1715 ---- .../v1/gonum/blas/gonum/level3cmplx64.go | 1735 ---- .../v1/gonum/blas/gonum/level3float32.go | 876 -- .../v1/gonum/blas/gonum/level3float64.go | 864 -- vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go | 318 - vendor/gonum.org/v1/gonum/floats/doc.go | 11 - vendor/gonum.org/v1/gonum/floats/floats.go | 933 -- vendor/gonum.org/v1/gonum/graph/doc.go | 9 - .../formats/cytoscapejs/testdata/LICENSE | 21 - .../formats/sigmajs/testdata/LICENSE.txt | 12 - vendor/gonum.org/v1/gonum/graph/graph.go | 282 - .../v1/gonum/graph/internal/linear/doc.go | 6 - .../v1/gonum/graph/internal/linear/linear.go | 73 - .../v1/gonum/graph/internal/ordered/doc.go | 6 - .../v1/gonum/graph/internal/ordered/sort.go | 93 - .../v1/gonum/graph/internal/set/doc.go | 6 - .../v1/gonum/graph/internal/set/same.go | 36 - .../graph/internal/set/same_appengine.go | 36 - .../v1/gonum/graph/internal/set/set.go | 228 - .../v1/gonum/graph/internal/uid/uid.go | 54 - .../gonum.org/v1/gonum/graph/iterator/doc.go | 9 - .../v1/gonum/graph/iterator/edges.go | 131 - .../v1/gonum/graph/iterator/lines.go | 131 - .../v1/gonum/graph/iterator/nodes.go | 125 - vendor/gonum.org/v1/gonum/graph/multigraph.go | 198 - .../gonum.org/v1/gonum/graph/nodes_edges.go | 300 - .../graph/simple/dense_directed_matrix.go | 301 - .../graph/simple/dense_undirected_matrix.go | 268 - .../v1/gonum/graph/simple/directed.go | 235 - vendor/gonum.org/v1/gonum/graph/simple/doc.go | 9 - .../gonum.org/v1/gonum/graph/simple/simple.go | 72 - .../v1/gonum/graph/simple/undirected.go | 216 - .../gonum/graph/simple/weighted_directed.go | 279 - .../gonum/graph/simple/weighted_undirected.go | 273 - .../v1/gonum/graph/topo/bron_kerbosch.go | 250 - .../v1/gonum/graph/topo/clique_graph.go | 111 - vendor/gonum.org/v1/gonum/graph/topo/doc.go | 6 - .../v1/gonum/graph/topo/johnson_cycles.go | 285 - .../v1/gonum/graph/topo/non_tomita_choice.go | 9 - .../v1/gonum/graph/topo/paton_cycles.go | 83 - .../gonum.org/v1/gonum/graph/topo/tarjan.go | 199 - .../v1/gonum/graph/topo/tomita_choice.go | 9 - vendor/gonum.org/v1/gonum/graph/topo/topo.go | 68 - .../gonum.org/v1/gonum/graph/traverse/doc.go | 6 - .../v1/gonum/graph/traverse/traverse.go | 231 - vendor/gonum.org/v1/gonum/graph/undirect.go | 270 - .../gonum/internal/asm/c128/axpyinc_amd64.s | 134 - .../gonum/internal/asm/c128/axpyincto_amd64.s | 141 - .../internal/asm/c128/axpyunitary_amd64.s | 122 - .../internal/asm/c128/axpyunitaryto_amd64.s | 123 - .../v1/gonum/internal/asm/c128/doc.go | 6 - .../gonum/internal/asm/c128/dotcinc_amd64.s | 153 - .../internal/asm/c128/dotcunitary_amd64.s | 143 - .../gonum/internal/asm/c128/dotuinc_amd64.s | 141 - .../internal/asm/c128/dotuunitary_amd64.s | 130 - .../gonum/internal/asm/c128/dscalinc_amd64.s | 69 - .../internal/asm/c128/dscalunitary_amd64.s | 66 - .../v1/gonum/internal/asm/c128/scal.go | 31 - .../internal/asm/c128/scalUnitary_amd64.s | 116 - .../gonum/internal/asm/c128/scalinc_amd64.s | 121 - .../v1/gonum/internal/asm/c128/stubs_amd64.go | 96 - .../v1/gonum/internal/asm/c128/stubs_noasm.go | 163 - .../v1/gonum/internal/asm/c64/axpyinc_amd64.s | 151 - .../gonum/internal/asm/c64/axpyincto_amd64.s | 156 - .../internal/asm/c64/axpyunitary_amd64.s | 160 - .../internal/asm/c64/axpyunitaryto_amd64.s | 157 - .../v1/gonum/internal/asm/c64/conj.go | 7 - .../v1/gonum/internal/asm/c64/doc.go | 6 - .../v1/gonum/internal/asm/c64/dotcinc_amd64.s | 160 - .../internal/asm/c64/dotcunitary_amd64.s | 208 - .../v1/gonum/internal/asm/c64/dotuinc_amd64.s | 148 - .../internal/asm/c64/dotuunitary_amd64.s | 197 - .../v1/gonum/internal/asm/c64/scal.go | 79 - .../v1/gonum/internal/asm/c64/stubs_amd64.go | 68 - .../v1/gonum/internal/asm/c64/stubs_noasm.go | 113 - .../v1/gonum/internal/asm/f32/axpyinc_amd64.s | 73 - .../gonum/internal/asm/f32/axpyincto_amd64.s | 78 - .../internal/asm/f32/axpyunitary_amd64.s | 97 - .../internal/asm/f32/axpyunitaryto_amd64.s | 98 - .../v1/gonum/internal/asm/f32/ddotinc_amd64.s | 91 - .../internal/asm/f32/ddotunitary_amd64.s | 110 - .../v1/gonum/internal/asm/f32/doc.go | 6 - .../v1/gonum/internal/asm/f32/dotinc_amd64.s | 85 - .../gonum/internal/asm/f32/dotunitary_amd64.s | 106 - .../v1/gonum/internal/asm/f32/ge_amd64.go | 15 - .../v1/gonum/internal/asm/f32/ge_amd64.s | 757 -- .../v1/gonum/internal/asm/f32/ge_noasm.go | 36 - .../v1/gonum/internal/asm/f32/scal.go | 55 - .../v1/gonum/internal/asm/f32/stubs_amd64.go | 68 - .../v1/gonum/internal/asm/f32/stubs_noasm.go | 113 - .../v1/gonum/internal/asm/f64/abssum_amd64.s | 82 - .../gonum/internal/asm/f64/abssuminc_amd64.s | 90 - .../v1/gonum/internal/asm/f64/add_amd64.s | 66 - .../gonum/internal/asm/f64/addconst_amd64.s | 53 - .../v1/gonum/internal/asm/f64/axpy.go | 57 - .../v1/gonum/internal/asm/f64/axpyinc_amd64.s | 142 - .../gonum/internal/asm/f64/axpyincto_amd64.s | 148 - .../internal/asm/f64/axpyunitary_amd64.s | 134 - .../internal/asm/f64/axpyunitaryto_amd64.s | 140 - .../v1/gonum/internal/asm/f64/cumprod_amd64.s | 71 - .../v1/gonum/internal/asm/f64/cumsum_amd64.s | 64 - .../v1/gonum/internal/asm/f64/div_amd64.s | 67 - .../v1/gonum/internal/asm/f64/divto_amd64.s | 73 - .../v1/gonum/internal/asm/f64/doc.go | 6 - .../v1/gonum/internal/asm/f64/dot.go | 35 - .../v1/gonum/internal/asm/f64/dot_amd64.s | 145 - .../v1/gonum/internal/asm/f64/ge_amd64.go | 22 - .../v1/gonum/internal/asm/f64/ge_noasm.go | 118 - .../v1/gonum/internal/asm/f64/gemvN_amd64.s | 685 -- .../v1/gonum/internal/asm/f64/gemvT_amd64.s | 745 -- .../v1/gonum/internal/asm/f64/ger_amd64.s | 591 -- .../v1/gonum/internal/asm/f64/l1norm_amd64.s | 58 - .../gonum/internal/asm/f64/linfnorm_amd64.s | 57 - .../v1/gonum/internal/asm/f64/scal.go | 57 - .../v1/gonum/internal/asm/f64/scalinc_amd64.s | 113 - .../gonum/internal/asm/f64/scalincto_amd64.s | 122 - .../internal/asm/f64/scalunitary_amd64.s | 112 - .../internal/asm/f64/scalunitaryto_amd64.s | 113 - .../v1/gonum/internal/asm/f64/stubs_amd64.go | 172 - .../v1/gonum/internal/asm/f64/stubs_noasm.go | 170 - .../v1/gonum/internal/asm/f64/sum_amd64.s | 100 - .../v1/gonum/internal/cmplx64/abs.go | 14 - .../v1/gonum/internal/cmplx64/conj.go | 12 - .../v1/gonum/internal/cmplx64/doc.go | 7 - .../v1/gonum/internal/cmplx64/isinf.go | 25 - .../v1/gonum/internal/cmplx64/isnan.go | 29 - .../v1/gonum/internal/cmplx64/sqrt.go | 108 - .../gonum.org/v1/gonum/internal/math32/doc.go | 7 - .../v1/gonum/internal/math32/math.go | 111 - .../v1/gonum/internal/math32/signbit.go | 16 - .../v1/gonum/internal/math32/sqrt.go | 25 - .../v1/gonum/internal/math32/sqrt_amd64.go | 20 - .../v1/gonum/internal/math32/sqrt_amd64.s | 20 - vendor/gonum.org/v1/gonum/lapack/doc.go | 6 - .../gonum.org/v1/gonum/lapack/gonum/dbdsqr.go | 505 - .../gonum.org/v1/gonum/lapack/gonum/dgebak.go | 89 - .../gonum.org/v1/gonum/lapack/gonum/dgebal.go | 239 - .../gonum.org/v1/gonum/lapack/gonum/dgebd2.go | 86 - .../gonum.org/v1/gonum/lapack/gonum/dgebrd.go | 161 - .../gonum.org/v1/gonum/lapack/gonum/dgecon.go | 92 - .../gonum.org/v1/gonum/lapack/gonum/dgeev.go | 279 - .../gonum.org/v1/gonum/lapack/gonum/dgehd2.go | 97 - .../gonum.org/v1/gonum/lapack/gonum/dgehrd.go | 194 - .../gonum.org/v1/gonum/lapack/gonum/dgelq2.go | 65 - .../gonum.org/v1/gonum/lapack/gonum/dgelqf.go | 97 - .../gonum.org/v1/gonum/lapack/gonum/dgels.go | 219 - .../gonum.org/v1/gonum/lapack/gonum/dgeql2.go | 61 - .../gonum.org/v1/gonum/lapack/gonum/dgeqp3.go | 186 - .../gonum.org/v1/gonum/lapack/gonum/dgeqr2.go | 76 - .../gonum.org/v1/gonum/lapack/gonum/dgeqrf.go | 108 - .../gonum.org/v1/gonum/lapack/gonum/dgerq2.go | 68 - .../gonum.org/v1/gonum/lapack/gonum/dgerqf.go | 129 - .../gonum.org/v1/gonum/lapack/gonum/dgesvd.go | 1374 --- .../gonum.org/v1/gonum/lapack/gonum/dgetf2.go | 84 - .../gonum.org/v1/gonum/lapack/gonum/dgetrf.go | 85 - .../gonum.org/v1/gonum/lapack/gonum/dgetri.go | 116 - .../gonum.org/v1/gonum/lapack/gonum/dgetrs.go | 72 - .../v1/gonum/lapack/gonum/dggsvd3.go | 242 - .../v1/gonum/lapack/gonum/dggsvp3.go | 281 - .../gonum.org/v1/gonum/lapack/gonum/dhseqr.go | 252 - .../gonum.org/v1/gonum/lapack/gonum/dlabrd.go | 173 - .../gonum.org/v1/gonum/lapack/gonum/dlacn2.go | 134 - .../gonum.org/v1/gonum/lapack/gonum/dlacpy.go | 59 - .../gonum.org/v1/gonum/lapack/gonum/dlae2.go | 49 - .../gonum.org/v1/gonum/lapack/gonum/dlaev2.go | 82 - .../gonum.org/v1/gonum/lapack/gonum/dlaexc.go | 269 - .../gonum.org/v1/gonum/lapack/gonum/dlags2.go | 182 - .../gonum.org/v1/gonum/lapack/gonum/dlahqr.go | 431 - .../gonum.org/v1/gonum/lapack/gonum/dlahr2.go | 195 - .../gonum.org/v1/gonum/lapack/gonum/dlaln2.go | 405 - .../gonum.org/v1/gonum/lapack/gonum/dlange.go | 86 - .../gonum.org/v1/gonum/lapack/gonum/dlanst.go | 75 - .../gonum.org/v1/gonum/lapack/gonum/dlansy.go | 132 - .../gonum.org/v1/gonum/lapack/gonum/dlantr.go | 260 - .../gonum.org/v1/gonum/lapack/gonum/dlanv2.go | 132 - .../gonum.org/v1/gonum/lapack/gonum/dlapll.go | 55 - .../gonum.org/v1/gonum/lapack/gonum/dlapmt.go | 89 - .../gonum.org/v1/gonum/lapack/gonum/dlapy2.go | 14 - .../gonum.org/v1/gonum/lapack/gonum/dlaqp2.go | 127 - .../gonum.org/v1/gonum/lapack/gonum/dlaqps.go | 244 - .../v1/gonum/lapack/gonum/dlaqr04.go | 478 - .../gonum.org/v1/gonum/lapack/gonum/dlaqr1.go | 59 - .../v1/gonum/lapack/gonum/dlaqr23.go | 415 - .../gonum.org/v1/gonum/lapack/gonum/dlaqr5.go | 644 -- .../gonum.org/v1/gonum/lapack/gonum/dlarf.go | 101 - .../gonum.org/v1/gonum/lapack/gonum/dlarfb.go | 449 - .../gonum.org/v1/gonum/lapack/gonum/dlarfg.go | 71 - .../gonum.org/v1/gonum/lapack/gonum/dlarft.go | 166 - .../gonum.org/v1/gonum/lapack/gonum/dlarfx.go | 550 - .../gonum.org/v1/gonum/lapack/gonum/dlartg.go | 80 - .../gonum.org/v1/gonum/lapack/gonum/dlas2.go | 43 - .../gonum.org/v1/gonum/lapack/gonum/dlascl.go | 111 - .../gonum.org/v1/gonum/lapack/gonum/dlaset.go | 57 - .../gonum.org/v1/gonum/lapack/gonum/dlasq1.go | 100 - .../gonum.org/v1/gonum/lapack/gonum/dlasq2.go | 369 - .../gonum.org/v1/gonum/lapack/gonum/dlasq3.go | 172 - .../gonum.org/v1/gonum/lapack/gonum/dlasq4.go | 249 - .../gonum.org/v1/gonum/lapack/gonum/dlasq5.go | 140 - .../gonum.org/v1/gonum/lapack/gonum/dlasq6.go | 118 - .../gonum.org/v1/gonum/lapack/gonum/dlasr.go | 279 - .../gonum.org/v1/gonum/lapack/gonum/dlasrt.go | 36 - .../gonum.org/v1/gonum/lapack/gonum/dlassq.go | 41 - .../gonum.org/v1/gonum/lapack/gonum/dlasv2.go | 115 - .../gonum.org/v1/gonum/lapack/gonum/dlaswp.go | 52 - .../gonum.org/v1/gonum/lapack/gonum/dlasy2.go | 290 - .../gonum.org/v1/gonum/lapack/gonum/dlatrd.go | 165 - .../gonum.org/v1/gonum/lapack/gonum/dlatrs.go | 359 - .../gonum.org/v1/gonum/lapack/gonum/dlauu2.go | 64 - .../gonum.org/v1/gonum/lapack/gonum/dlauum.go | 81 - vendor/gonum.org/v1/gonum/lapack/gonum/doc.go | 28 - .../gonum.org/v1/gonum/lapack/gonum/dorg2l.go | 76 - .../gonum.org/v1/gonum/lapack/gonum/dorg2r.go | 75 - .../gonum.org/v1/gonum/lapack/gonum/dorgbr.go | 138 - .../gonum.org/v1/gonum/lapack/gonum/dorghr.go | 101 - .../gonum.org/v1/gonum/lapack/gonum/dorgl2.go | 71 - .../gonum.org/v1/gonum/lapack/gonum/dorglq.go | 123 - .../gonum.org/v1/gonum/lapack/gonum/dorgql.go | 136 - .../gonum.org/v1/gonum/lapack/gonum/dorgqr.go | 134 - .../gonum.org/v1/gonum/lapack/gonum/dorgtr.go | 104 - .../gonum.org/v1/gonum/lapack/gonum/dorm2r.go | 101 - .../gonum.org/v1/gonum/lapack/gonum/dormbr.go | 178 - .../gonum.org/v1/gonum/lapack/gonum/dormhr.go | 129 - .../gonum.org/v1/gonum/lapack/gonum/dorml2.go | 102 - .../gonum.org/v1/gonum/lapack/gonum/dormlq.go | 174 - .../gonum.org/v1/gonum/lapack/gonum/dormqr.go | 177 - .../gonum.org/v1/gonum/lapack/gonum/dormr2.go | 103 - .../gonum.org/v1/gonum/lapack/gonum/dpbtf2.go | 112 - .../gonum.org/v1/gonum/lapack/gonum/dpbtrf.go | 214 - .../gonum.org/v1/gonum/lapack/gonum/dpbtrs.go | 68 - .../gonum.org/v1/gonum/lapack/gonum/dpocon.go | 90 - .../gonum.org/v1/gonum/lapack/gonum/dpotf2.go | 82 - .../gonum.org/v1/gonum/lapack/gonum/dpotrf.go | 81 - .../gonum.org/v1/gonum/lapack/gonum/dpotri.go | 44 - .../gonum.org/v1/gonum/lapack/gonum/dpotrs.go | 62 - .../gonum.org/v1/gonum/lapack/gonum/drscl.go | 63 - .../gonum.org/v1/gonum/lapack/gonum/dsteqr.go | 376 - .../gonum.org/v1/gonum/lapack/gonum/dsterf.go | 285 - .../gonum.org/v1/gonum/lapack/gonum/dsyev.go | 130 - .../gonum.org/v1/gonum/lapack/gonum/dsytd2.go | 136 - .../gonum.org/v1/gonum/lapack/gonum/dsytrd.go | 172 - .../gonum.org/v1/gonum/lapack/gonum/dtgsja.go | 373 - .../gonum.org/v1/gonum/lapack/gonum/dtrcon.go | 90 - .../v1/gonum/lapack/gonum/dtrevc3.go | 885 -- .../gonum.org/v1/gonum/lapack/gonum/dtrexc.go | 230 - .../gonum.org/v1/gonum/lapack/gonum/dtrti2.go | 69 - .../gonum.org/v1/gonum/lapack/gonum/dtrtri.go | 72 - .../gonum.org/v1/gonum/lapack/gonum/dtrtrs.go | 55 - .../gonum.org/v1/gonum/lapack/gonum/errors.go | 174 - .../gonum.org/v1/gonum/lapack/gonum/iladlc.go | 45 - .../gonum.org/v1/gonum/lapack/gonum/iladlr.go | 41 - .../gonum.org/v1/gonum/lapack/gonum/ilaenv.go | 387 - .../gonum.org/v1/gonum/lapack/gonum/iparmq.go | 115 - .../gonum.org/v1/gonum/lapack/gonum/lapack.go | 55 - vendor/gonum.org/v1/gonum/lapack/lapack.go | 213 - .../gonum.org/v1/gonum/lapack/lapack64/doc.go | 20 - .../v1/gonum/lapack/lapack64/lapack64.go | 581 -- vendor/gonum.org/v1/gonum/mat/band.go | 277 - vendor/gonum.org/v1/gonum/mat/cdense.go | 168 - vendor/gonum.org/v1/gonum/mat/cholesky.go | 673 -- vendor/gonum.org/v1/gonum/mat/cmatrix.go | 210 - vendor/gonum.org/v1/gonum/mat/consts.go | 15 - vendor/gonum.org/v1/gonum/mat/dense.go | 558 - .../v1/gonum/mat/dense_arithmetic.go | 880 -- vendor/gonum.org/v1/gonum/mat/diagonal.go | 322 - vendor/gonum.org/v1/gonum/mat/doc.go | 169 - vendor/gonum.org/v1/gonum/mat/eigen.go | 350 - vendor/gonum.org/v1/gonum/mat/errors.go | 149 - vendor/gonum.org/v1/gonum/mat/format.go | 238 - vendor/gonum.org/v1/gonum/mat/gsvd.go | 415 - vendor/gonum.org/v1/gonum/mat/hogsvd.go | 233 - .../v1/gonum/mat/index_bound_checks.go | 348 - .../v1/gonum/mat/index_no_bound_checks.go | 359 - vendor/gonum.org/v1/gonum/mat/inner.go | 124 - vendor/gonum.org/v1/gonum/mat/io.go | 492 - vendor/gonum.org/v1/gonum/mat/lq.go | 262 - vendor/gonum.org/v1/gonum/mat/lu.go | 422 - vendor/gonum.org/v1/gonum/mat/matrix.go | 985 -- vendor/gonum.org/v1/gonum/mat/offset.go | 20 - .../v1/gonum/mat/offset_appengine.go | 24 - vendor/gonum.org/v1/gonum/mat/pool.go | 236 - vendor/gonum.org/v1/gonum/mat/product.go | 193 - vendor/gonum.org/v1/gonum/mat/qr.go | 260 - vendor/gonum.org/v1/gonum/mat/shadow.go | 249 - vendor/gonum.org/v1/gonum/mat/solve.go | 140 - vendor/gonum.org/v1/gonum/mat/svd.go | 247 - vendor/gonum.org/v1/gonum/mat/symband.go | 231 - vendor/gonum.org/v1/gonum/mat/symmetric.go | 605 -- vendor/gonum.org/v1/gonum/mat/triangular.go | 684 -- vendor/gonum.org/v1/gonum/mat/triband.go | 367 - vendor/gonum.org/v1/gonum/mat/vector.go | 745 -- vendor/google.golang.org/api/AUTHORS | 10 + vendor/google.golang.org/api/CONTRIBUTORS | 55 + vendor/google.golang.org/api/LICENSE | 27 + .../googleapi/internal/uritemplates/LICENSE | 18 + .../api/googleapi/transport/apikey.go | 38 + .../google.golang.org/api/internal/creds.go | 102 + vendor/google.golang.org/api/internal/pool.go | 61 + .../api/internal/settings.go | 96 + .../api/iterator/iterator.go | 231 + .../api/option/credentials_go19.go | 33 + .../api/option/credentials_notgo19.go | 32 + vendor/google.golang.org/api/option/option.go | 235 + .../api/support/bundler/bundler.go | 349 + .../google.golang.org/api/transport/dial.go | 46 + vendor/google.golang.org/api/transport/doc.go | 21 + .../google.golang.org/api/transport/go19.go | 35 + .../api/transport/grpc/dial.go | 209 + .../api/transport/grpc/dial_appengine.go | 41 + .../api/transport/grpc/dial_socketopt.go | 59 + .../api/transport/http/dial.go | 161 + .../api/transport/http/dial_appengine.go | 30 + .../http/internal/propagation/http.go | 96 + .../api/transport/not_go19.go | 35 + .../google.golang.org/appengine/appengine.go | 135 + .../appengine/appengine_vm.go | 20 + vendor/google.golang.org/appengine/errors.go | 46 + .../google.golang.org/appengine/identity.go | 142 + .../app_identity/app_identity_service.pb.go | 611 ++ .../internal/modules/modules_service.pb.go | 786 ++ .../internal/socket/socket_service.pb.go | 2822 ++++++ .../google.golang.org/appengine/namespace.go | 25 + .../google.golang.org/appengine/socket/doc.go | 10 + .../appengine/socket/socket_classic.go | 290 + .../appengine/socket/socket_vm.go | 64 + vendor/google.golang.org/appengine/timeout.go | 20 + vendor/google.golang.org/genproto/LICENSE | 202 + .../api/annotations/annotations.pb.go | 54 + .../googleapis/api/annotations/client.pb.go | 76 + .../api/annotations/field_behavior.pb.go | 119 + .../googleapis/api/annotations/http.pb.go | 688 ++ .../googleapis/api/annotations/resource.pb.go | 154 + .../api/distribution/distribution.pb.go | 714 ++ .../genproto/googleapis/api/label/label.pb.go | 134 + .../googleapis/api/metric/metric.pb.go | 389 + .../api/monitoredres/monitored_resource.pb.go | 289 + .../devtools/cloudtrace/v2/trace.pb.go | 1391 +++ .../devtools/cloudtrace/v2/tracing.pb.go | 227 + .../googleapis/monitoring/v3/alert.pb.go | 958 ++ .../monitoring/v3/alert_service.pb.go | 667 ++ .../googleapis/monitoring/v3/common.pb.go | 887 ++ .../monitoring/v3/dropped_labels.pb.go | 101 + .../googleapis/monitoring/v3/group.pb.go | 156 + .../monitoring/v3/group_service.pb.go | 941 ++ .../googleapis/monitoring/v3/metric.pb.go | 232 + .../monitoring/v3/metric_service.pb.go | 1208 +++ .../monitoring/v3/mutation_record.pb.go | 97 + .../monitoring/v3/notification.pb.go | 368 + .../monitoring/v3/notification_service.pb.go | 1308 +++ .../monitoring/v3/span_context.pb.go | 96 + .../googleapis/monitoring/v3/uptime.pb.go | 969 ++ .../monitoring/v3/uptime_service.pb.go | 786 ++ .../googleapis/rpc/status/status.pb.go | 156 + .../protobuf/field_mask/field_mask.pb.go | 280 + vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/backoff.go | 38 + vendor/google.golang.org/grpc/balancer.go | 391 + .../grpc/balancer/balancer.go | 364 + .../grpc/balancer/base/balancer.go | 178 + .../grpc/balancer/base/base.go | 64 + .../grpclb/grpc_lb_v1/load_balancer.pb.go | 839 ++ .../grpc/balancer/grpclb/grpclb.go | 485 + .../grpc/balancer/grpclb/grpclb_config.go | 66 + .../grpc/balancer/grpclb/grpclb_picker.go | 195 + .../balancer/grpclb/grpclb_remote_balancer.go | 353 + .../grpc/balancer/grpclb/grpclb_util.go | 209 + .../grpc/balancer/roundrobin/roundrobin.go | 83 + .../grpc/balancer_conn_wrappers.go | 318 + .../grpc/balancer_v1_wrapper.go | 334 + .../grpc_binarylog_v1/binarylog.pb.go | 900 ++ vendor/google.golang.org/grpc/call.go | 74 + vendor/google.golang.org/grpc/clientconn.go | 1433 +++ vendor/google.golang.org/grpc/codec.go | 50 + .../grpc/codes/code_string.go | 62 + vendor/google.golang.org/grpc/codes/codes.go | 198 + .../grpc/connectivity/connectivity.go | 73 + .../grpc/credentials/alts/alts.go | 330 + .../alts/internal/authinfo/authinfo.go | 87 + .../grpc/credentials/alts/internal/common.go | 69 + .../alts/internal/conn/aeadrekey.go | 131 + .../alts/internal/conn/aes128gcm.go | 105 + .../alts/internal/conn/aes128gcmrekey.go | 116 + .../credentials/alts/internal/conn/common.go | 70 + .../credentials/alts/internal/conn/counter.go | 62 + .../credentials/alts/internal/conn/record.go | 271 + .../credentials/alts/internal/conn/utils.go | 63 + .../alts/internal/handshaker/handshaker.go | 365 + .../internal/handshaker/service/service.go | 54 + .../internal/proto/grpc_gcp/altscontext.pb.go | 151 + .../internal/proto/grpc_gcp/handshaker.pb.go | 1196 +++ .../grpc_gcp/transport_security_common.pb.go | 178 + .../grpc/credentials/alts/utils.go | 141 + .../grpc/credentials/credentials.go | 336 + .../grpc/credentials/google/google.go | 125 + .../grpc/credentials/internal/syscallconn.go | 61 + .../internal/syscallconn_appengine.go | 30 + .../grpc/credentials/oauth/oauth.go | 173 + .../grpc/credentials/tls13.go | 30 + vendor/google.golang.org/grpc/dialoptions.go | 558 + vendor/google.golang.org/grpc/doc.go | 24 + .../grpc/encoding/encoding.go | 118 + .../grpc/encoding/proto/proto.go | 110 + .../google.golang.org/grpc/grpclog/grpclog.go | 126 + .../google.golang.org/grpc/grpclog/logger.go | 85 + .../grpc/grpclog/loggerv2.go | 195 + vendor/google.golang.org/grpc/interceptor.go | 77 + .../grpc/internal/backoff/backoff.go | 78 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 167 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 210 + .../grpc/internal/binarylog/method_logger.go | 423 + .../grpc/internal/binarylog/sink.go | 162 + .../grpc/internal/binarylog/util.go | 41 + .../grpc/internal/channelz/funcs.go | 727 ++ .../grpc/internal/channelz/types.go | 702 ++ .../grpc/internal/channelz/types_linux.go | 53 + .../grpc/internal/channelz/types_nonlinux.go | 44 + .../grpc/internal/channelz/util_linux.go | 39 + .../grpc/internal/channelz/util_nonlinux.go | 26 + .../grpc/internal/envconfig/envconfig.go | 64 + .../grpc/internal/grpcrand/grpcrand.go | 56 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/internal.go | 71 + .../grpc/internal/syscall/syscall_linux.go | 114 + .../grpc/internal/syscall/syscall_nonlinux.go | 73 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 852 ++ .../grpc/internal/transport/defaults.go | 49 + .../grpc/internal/transport/flowcontrol.go | 218 + .../grpc/internal/transport/handler_server.go | 431 + .../grpc/internal/transport/http2_client.go | 1403 +++ .../grpc/internal/transport/http2_server.go | 1219 +++ .../grpc/internal/transport/http_util.go | 676 ++ .../grpc/internal/transport/log.go | 44 + .../grpc/internal/transport/transport.go | 795 ++ .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 209 + .../grpc/naming/dns_resolver.go | 293 + .../google.golang.org/grpc/naming/naming.go | 68 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 197 + vendor/google.golang.org/grpc/pickfirst.go | 110 + vendor/google.golang.org/grpc/preloader.go | 64 + vendor/google.golang.org/grpc/proxy.go | 152 + .../grpc/resolver/dns/dns_resolver.go | 457 + .../grpc/resolver/passthrough/passthrough.go | 57 + .../grpc/resolver/resolver.go | 193 + .../grpc/resolver_conn_wrapper.go | 168 + vendor/google.golang.org/grpc/rpc_util.go | 863 ++ vendor/google.golang.org/grpc/server.go | 1524 +++ .../google.golang.org/grpc/service_config.go | 429 + .../grpc/serviceconfig/serviceconfig.go | 48 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 300 + .../google.golang.org/grpc/status/status.go | 217 + vendor/google.golang.org/grpc/stream.go | 1511 +++ vendor/google.golang.org/grpc/tap/tap.go | 51 + vendor/google.golang.org/grpc/trace.go | 126 + vendor/google.golang.org/grpc/version.go | 22 + .../api/admissionregistration/v1alpha1/doc.go | 25 + .../v1alpha1}/generated.pb.go | 573 +- .../v1alpha1}/register.go | 16 +- .../admissionregistration/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 71 + .../v1alpha1/zz_generated.deepcopy.go | 78 +- .../api/admissionregistration/v1beta1/doc.go | 5 +- .../v1beta1/generated.pb.go | 1867 +--- .../admissionregistration/v1beta1/types.go | 275 +- .../v1beta1/types_swagger_doc_generated.go | 59 +- .../v1beta1/zz_generated.deepcopy.go | 180 +- vendor/k8s.io/api/apps/v1/doc.go | 1 - vendor/k8s.io/api/apps/v1/generated.pb.go | 21 +- vendor/k8s.io/api/apps/v1/types.go | 11 +- .../apps/v1/types_swagger_doc_generated.go | 2 +- .../api/apps/v1/zz_generated.deepcopy.go | 10 +- vendor/k8s.io/api/apps/v1beta1/doc.go | 1 - .../k8s.io/api/apps/v1beta1/generated.pb.go | 293 +- vendor/k8s.io/api/apps/v1beta1/types.go | 11 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 6 +- vendor/k8s.io/api/apps/v1beta2/doc.go | 1 - .../k8s.io/api/apps/v1beta2/generated.pb.go | 157 +- vendor/k8s.io/api/apps/v1beta2/types.go | 37 +- .../v1beta2/types_swagger_doc_generated.go | 30 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 10 +- .../v1alpha1/generated.pb.go | 1715 ---- .../api/auditregistration/v1alpha1/types.go | 198 - .../v1alpha1/types_swagger_doc_generated.go | 111 - .../v1alpha1/zz_generated.deepcopy.go | 229 - vendor/k8s.io/api/authentication/v1/doc.go | 2 - .../api/authentication/v1/generated.pb.go | 384 +- vendor/k8s.io/api/authentication/v1/types.go | 23 +- .../v1/types_swagger_doc_generated.go | 8 +- .../v1/zz_generated.deepcopy.go | 12 +- .../k8s.io/api/authentication/v1beta1/doc.go | 2 - .../authentication/v1beta1/generated.pb.go | 353 +- .../api/authentication/v1beta1/types.go | 18 - .../v1beta1/types_swagger_doc_generated.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 12 +- vendor/k8s.io/api/authorization/v1/doc.go | 2 - .../api/authorization/v1/generated.pb.go | 167 +- .../k8s.io/api/authorization/v1beta1/doc.go | 2 - .../api/authorization/v1beta1/generated.pb.go | 167 +- vendor/k8s.io/api/autoscaling/v1/doc.go | 1 - .../k8s.io/api/autoscaling/v1/generated.pb.go | 21 +- .../v1/types_swagger_doc_generated.go | 8 +- .../autoscaling/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 1 - .../api/autoscaling/v2beta1/generated.pb.go | 21 +- .../v2beta1/types_swagger_doc_generated.go | 8 +- .../v2beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 1 - .../api/autoscaling/v2beta2/generated.pb.go | 21 +- .../v2beta2/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v1/doc.go | 1 - vendor/k8s.io/api/batch/v1/generated.pb.go | 21 +- .../api/batch/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v1beta1/doc.go | 1 - .../k8s.io/api/batch/v1beta1/generated.pb.go | 21 +- .../batch/v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v2alpha1/doc.go | 1 - .../k8s.io/api/batch/v2alpha1/generated.pb.go | 21 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/certificates/v1beta1/doc.go | 2 - .../api/certificates/v1beta1/generated.pb.go | 167 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/coordination/v1/types.go | 74 - .../v1/types_swagger_doc_generated.go | 63 - .../coordination/v1/zz_generated.deepcopy.go | 124 - vendor/k8s.io/api/coordination/v1beta1/doc.go | 2 - .../api/coordination/v1beta1/generated.pb.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- .../api/core/v1/annotation_key_constants.go | 25 - vendor/k8s.io/api/core/v1/doc.go | 1 - vendor/k8s.io/api/core/v1/generated.pb.go | 8979 +++++++---------- vendor/k8s.io/api/core/v1/types.go | 210 +- .../core/v1/types_swagger_doc_generated.go | 115 +- .../k8s.io/api/core/v1/well_known_labels.go | 36 - .../api/core/v1/zz_generated.deepcopy.go | 153 +- vendor/k8s.io/api/events/v1beta1/doc.go | 2 - .../k8s.io/api/events/v1beta1/generated.pb.go | 21 +- vendor/k8s.io/api/events/v1beta1/types.go | 1 - .../v1beta1/types_swagger_doc_generated.go | 2 +- .../events/v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/extensions/v1beta1/doc.go | 1 - .../api/extensions/v1beta1/generated.pb.go | 2220 ++-- vendor/k8s.io/api/extensions/v1beta1/types.go | 143 +- .../v1beta1/types_swagger_doc_generated.go | 99 +- .../v1beta1/zz_generated.deepcopy.go | 162 +- vendor/k8s.io/api/networking/v1/doc.go | 2 - .../k8s.io/api/networking/v1/generated.pb.go | 21 +- vendor/k8s.io/api/networking/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 6 +- .../networking/v1/zz_generated.deepcopy.go | 2 +- .../api/networking/v1beta1/generated.pb.go | 1953 ---- .../k8s.io/api/networking/v1beta1/register.go | 56 - vendor/k8s.io/api/networking/v1beta1/types.go | 192 - .../v1beta1/types_swagger_doc_generated.go | 127 - .../k8s.io/api/node/v1alpha1/generated.pb.go | 696 -- vendor/k8s.io/api/node/v1alpha1/types.go | 75 - .../v1alpha1/types_swagger_doc_generated.go | 59 - .../k8s.io/api/node/v1beta1/generated.pb.go | 564 -- vendor/k8s.io/api/node/v1beta1/types.go | 65 - .../v1beta1/types_swagger_doc_generated.go | 50 - .../api/node/v1beta1/zz_generated.deepcopy.go | 84 - vendor/k8s.io/api/policy/v1beta1/doc.go | 3 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 1074 +- vendor/k8s.io/api/policy/v1beta1/types.go | 94 +- .../v1beta1/types_swagger_doc_generated.go | 32 - .../policy/v1beta1/zz_generated.deepcopy.go | 82 +- vendor/k8s.io/api/rbac/v1/doc.go | 2 - vendor/k8s.io/api/rbac/v1/generated.pb.go | 21 +- vendor/k8s.io/api/rbac/v1/types.go | 2 - .../rbac/v1/types_swagger_doc_generated.go | 2 +- .../api/rbac/v1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 - .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 21 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 2 - .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 - .../k8s.io/api/rbac/v1beta1/generated.pb.go | 21 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 2 - .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/scheduling/v1/doc.go | 23 - .../k8s.io/api/scheduling/v1/generated.pb.go | 667 -- vendor/k8s.io/api/scheduling/v1/register.go | 55 - vendor/k8s.io/api/scheduling/v1/types.go | 74 - .../v1/types_swagger_doc_generated.go | 53 - .../scheduling/v1/zz_generated.deepcopy.go | 90 - vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 2 - .../api/scheduling/v1alpha1/generated.pb.go | 125 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 9 - .../v1alpha1/types_swagger_doc_generated.go | 11 +- .../v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/scheduling/v1beta1/doc.go | 2 - .../api/scheduling/v1beta1/generated.pb.go | 125 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 13 +- .../v1beta1/types_swagger_doc_generated.go | 13 +- .../v1beta1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/settings/v1alpha1/doc.go | 2 - .../api/settings/v1alpha1/generated.pb.go | 21 +- vendor/k8s.io/api/settings/v1alpha1/types.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/storage/v1/doc.go | 4 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 1800 +--- vendor/k8s.io/api/storage/v1/register.go | 3 - vendor/k8s.io/api/storage/v1/types.go | 114 - .../storage/v1/types_swagger_doc_generated.go | 63 - .../api/storage/v1/zz_generated.deepcopy.go | 156 +- vendor/k8s.io/api/storage/v1alpha1/doc.go | 4 +- .../api/storage/v1alpha1/generated.pb.go | 316 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 14 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/storage/v1beta1/doc.go | 2 - .../api/storage/v1beta1/generated.pb.go | 1861 +--- vendor/k8s.io/api/storage/v1beta1/register.go | 6 - vendor/k8s.io/api/storage/v1beta1/types.go | 168 +- .../v1beta1/types_swagger_doc_generated.go | 74 +- .../storage/v1beta1/zz_generated.deepcopy.go | 199 +- .../apimachinery/pkg/api/errors/errors.go | 79 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 79 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 31 +- .../pkg/api/resource/generated.pb.go | 3 +- .../apimachinery/pkg/api/resource/math.go | 4 +- .../apimachinery/pkg/api/resource/quantity.go | 8 +- .../pkg/api/validation/objectmeta.go | 59 +- .../apis/meta/internalversion/conversion.go | 2 + .../pkg/apis/meta/internalversion/doc.go | 2 +- .../pkg/apis/meta/internalversion/register.go | 12 +- .../pkg/apis/meta/internalversion/types.go | 12 +- .../zz_generated.conversion.go | 4 +- .../internalversion/zz_generated.deepcopy.go | 2 +- .../pkg/apis/meta/v1/conversion.go | 10 - .../apimachinery/pkg/apis/meta/v1/doc.go | 1 - .../apimachinery/pkg/apis/meta/v1/duration.go | 10 - .../pkg/apis/meta/v1/generated.pb.go | 2691 ++--- .../apimachinery/pkg/apis/meta/v1/helpers.go | 33 - .../apimachinery/pkg/apis/meta/v1/meta.go | 16 +- .../pkg/apis/meta/v1/micro_time.go | 31 +- .../apimachinery/pkg/apis/meta/v1/register.go | 19 - .../apimachinery/pkg/apis/meta/v1/time.go | 20 +- .../apimachinery/pkg/apis/meta/v1/types.go | 333 +- .../meta/v1/types_swagger_doc_generated.go | 187 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 19 - .../apis/meta/v1/unstructured/unstructured.go | 73 +- .../meta/v1/unstructured/unstructured_list.go | 22 - .../pkg/apis/meta/v1/validation/validation.go | 69 +- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 265 +- .../pkg/apis/meta/v1beta1/deepcopy.go | 27 + .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 3 +- .../pkg/apis/meta/v1beta1/generated.pb.go | 322 +- .../pkg/apis/meta/v1beta1/register.go | 12 +- .../pkg/apis/meta/v1beta1/types.go | 143 +- .../v1beta1/types_swagger_doc_generated.go | 70 +- .../meta/v1beta1/zz_generated.deepcopy.go | 138 +- .../pkg/conversion/queryparams/convert.go | 4 + .../k8s.io/apimachinery/pkg/labels/labels.go | 2 +- .../apimachinery/pkg/labels/selector.go | 14 +- .../apimachinery/pkg/runtime/converter.go | 12 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 29 - .../apimachinery/pkg/runtime/generated.pb.go | 21 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 53 +- .../apimachinery/pkg/runtime/interfaces.go | 26 - .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 - .../pkg/runtime/schema/generated.pb.go | 3 +- .../pkg/runtime/schema/group_version.go | 10 +- .../pkg/runtime/serializer/codec_factory.go | 53 +- .../pkg/runtime/serializer/json/json.go | 97 +- .../runtime/serializer/protobuf/protobuf.go | 37 +- .../runtime/serializer/protobuf_extension.go | 48 + .../serializer/versioning/versioning.go | 102 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 4 +- vendor/k8s.io/apimachinery/pkg/types/patch.go | 1 - .../k8s.io/apimachinery/pkg/util/diff/diff.go | 229 +- .../apimachinery/pkg/util/errors/errors.go | 38 +- .../pkg/util/intstr/generated.pb.go | 21 +- .../apimachinery/pkg/util/intstr/intstr.go | 4 +- .../apimachinery/pkg/util/mergepatch/util.go | 2 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 17 +- .../apimachinery/pkg/util/net/interface.go | 38 +- .../k8s.io/apimachinery/pkg/util/rand/rand.go | 9 +- .../apimachinery/pkg/util/runtime/runtime.go | 37 +- .../apimachinery/pkg/util/sets/int32.go | 203 - .../pkg/util/validation/validation.go | 13 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 161 +- .../apimachinery/pkg/util/yaml/decoder.go | 8 +- vendor/k8s.io/apimachinery/pkg/version/doc.go | 3 +- .../apimachinery/pkg/watch/streamwatcher.go | 33 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 11 +- .../client-go/discovery/cached_discovery.go | 282 + .../client-go/discovery/discovery_client.go | 120 +- .../client-go/discovery/fake/discovery.go | 31 +- vendor/k8s.io/client-go/discovery/helper.go | 8 +- .../client-go/discovery/round_tripper.go | 62 + vendor/k8s.io/client-go/dynamic/interface.go | 2 +- vendor/k8s.io/client-go/dynamic/scheme.go | 4 - vendor/k8s.io/client-go/dynamic/simple.go | 49 +- .../admissionregistration/interface.go | 8 + .../v1alpha1/initializerconfiguration.go | 88 + .../v1alpha1/interface.go | 10 +- .../auditregistration/v1alpha1/auditsink.go | 88 - .../informers/coordination/interface.go | 8 - .../informers/extensions/v1beta1/interface.go | 7 - vendor/k8s.io/client-go/informers/factory.go | 12 - vendor/k8s.io/client-go/informers/generic.go | 45 +- .../internalinterfaces/factory_interfaces.go | 2 - .../informers/networking/interface.go | 8 - .../informers/node/v1alpha1/runtimeclass.go | 88 - .../informers/node/v1beta1/interface.go | 45 - .../informers/node/v1beta1/runtimeclass.go | 88 - .../informers/scheduling/interface.go | 8 - .../informers/scheduling/v1/interface.go | 45 - .../informers/scheduling/v1/priorityclass.go | 88 - .../informers/storage/v1/interface.go | 7 - .../informers/storage/v1/volumeattachment.go | 88 - .../informers/storage/v1beta1/csidriver.go | 88 - .../informers/storage/v1beta1/csinode.go | 88 - .../informers/storage/v1beta1/interface.go | 14 - .../k8s.io/client-go/kubernetes/clientset.go | 288 +- .../client-go/kubernetes/scheme/register.go | 16 +- .../v1alpha1/admissionregistration_client.go} | 35 +- .../v1alpha1/doc.go | 0 .../v1alpha1/generated_expansion.go | 2 +- .../v1alpha1/initializerconfiguration.go | 147 + .../v1beta1/admissionregistration_client.go | 3 +- .../v1beta1/mutatingwebhookconfiguration.go | 17 - .../v1beta1/validatingwebhookconfiguration.go | 17 - .../kubernetes/typed/apps/v1/apps_client.go | 3 +- .../typed/apps/v1/controllerrevision.go | 17 - .../kubernetes/typed/apps/v1/daemonset.go | 17 - .../kubernetes/typed/apps/v1/deployment.go | 49 - .../kubernetes/typed/apps/v1/replicaset.go | 49 - .../kubernetes/typed/apps/v1/statefulset.go | 49 - .../typed/apps/v1beta1/apps_client.go | 8 +- .../typed/apps/v1beta1/controllerrevision.go | 17 - .../typed/apps/v1beta1/deployment.go | 17 - .../typed/apps/v1beta1/generated_expansion.go | 2 + .../v1beta1/scale.go} | 29 +- .../typed/apps/v1beta1/statefulset.go | 17 - .../typed/apps/v1beta2/apps_client.go | 8 +- .../typed/apps/v1beta2/controllerrevision.go | 17 - .../typed/apps/v1beta2/daemonset.go | 17 - .../typed/apps/v1beta2/deployment.go | 17 - .../typed/apps/v1beta2/generated_expansion.go | 2 + .../typed/apps/v1beta2/replicaset.go | 17 - .../kubernetes/typed/apps/v1beta2/scale.go | 48 + .../typed/apps/v1beta2/statefulset.go | 17 - .../auditregistration/v1alpha1/auditsink.go | 164 - .../v1/authentication_client.go | 3 +- .../v1beta1/authentication_client.go | 3 +- .../authorization/v1/authorization_client.go | 3 +- .../v1beta1/authorization_client.go | 3 +- .../autoscaling/v1/autoscaling_client.go | 3 +- .../autoscaling/v1/horizontalpodautoscaler.go | 17 - .../autoscaling/v2beta1/autoscaling_client.go | 3 +- .../v2beta1/horizontalpodautoscaler.go | 17 - .../autoscaling/v2beta2/autoscaling_client.go | 3 +- .../v2beta2/horizontalpodautoscaler.go | 17 - .../kubernetes/typed/batch/v1/batch_client.go | 3 +- .../kubernetes/typed/batch/v1/job.go | 17 - .../typed/batch/v1beta1/batch_client.go | 3 +- .../kubernetes/typed/batch/v1beta1/cronjob.go | 17 - .../typed/batch/v2alpha1/batch_client.go | 3 +- .../typed/batch/v2alpha1/cronjob.go | 17 - .../v1beta1/certificates_client.go | 3 +- .../v1beta1/certificatesigningrequest.go | 17 - .../coordination/v1/coordination_client.go | 89 - .../kubernetes/typed/coordination/v1/lease.go | 174 - .../v1beta1/coordination_client.go | 3 +- .../typed/coordination/v1beta1/lease.go | 17 - .../typed/core/v1/componentstatus.go | 17 - .../kubernetes/typed/core/v1/configmap.go | 17 - .../kubernetes/typed/core/v1/core_client.go | 3 +- .../kubernetes/typed/core/v1/endpoints.go | 17 - .../kubernetes/typed/core/v1/event.go | 17 - .../kubernetes/typed/core/v1/limitrange.go | 17 - .../kubernetes/typed/core/v1/namespace.go | 12 - .../kubernetes/typed/core/v1/node.go | 17 - .../typed/core/v1/persistentvolume.go | 17 - .../typed/core/v1/persistentvolumeclaim.go | 17 - .../client-go/kubernetes/typed/core/v1/pod.go | 17 - .../kubernetes/typed/core/v1/podtemplate.go | 17 - .../typed/core/v1/replicationcontroller.go | 33 +- .../kubernetes/typed/core/v1/resourcequota.go | 17 - .../kubernetes/typed/core/v1/secret.go | 17 - .../kubernetes/typed/core/v1/service.go | 12 - .../typed/core/v1/serviceaccount.go | 17 - .../kubernetes/typed/events/v1beta1/event.go | 17 - .../typed/events/v1beta1/event_expansion.go | 98 - .../typed/events/v1beta1/events_client.go | 3 +- .../events/v1beta1/generated_expansion.go | 2 + .../typed/extensions/v1beta1/daemonset.go | 17 - .../typed/extensions/v1beta1/deployment.go | 17 - .../extensions/v1beta1/extensions_client.go | 13 +- .../extensions/v1beta1/generated_expansion.go | 2 - .../typed/extensions/v1beta1/ingress.go | 17 - .../typed/extensions/v1beta1/networkpolicy.go | 174 - .../extensions/v1beta1/podsecuritypolicy.go | 17 - .../typed/extensions/v1beta1/replicaset.go | 17 - .../typed/extensions/v1beta1/scale.go | 48 + .../extensions/v1beta1/scale_expansion.go | 65 + .../typed/networking/v1/networking_client.go | 3 +- .../typed/networking/v1/networkpolicy.go | 17 - .../typed/networking/v1beta1/ingress.go | 191 - .../networking/v1beta1/networking_client.go | 89 - .../typed/node/v1alpha1/runtimeclass.go | 164 - .../typed/node/v1beta1/runtimeclass.go | 164 - .../policy/v1beta1/poddisruptionbudget.go | 17 - .../typed/policy/v1beta1/podsecuritypolicy.go | 17 - .../typed/policy/v1beta1/policy_client.go | 3 +- .../kubernetes/typed/rbac/v1/clusterrole.go | 17 - .../typed/rbac/v1/clusterrolebinding.go | 17 - .../kubernetes/typed/rbac/v1/rbac_client.go | 3 +- .../kubernetes/typed/rbac/v1/role.go | 17 - .../kubernetes/typed/rbac/v1/rolebinding.go | 17 - .../typed/rbac/v1alpha1/clusterrole.go | 17 - .../typed/rbac/v1alpha1/clusterrolebinding.go | 17 - .../typed/rbac/v1alpha1/rbac_client.go | 3 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 17 - .../typed/rbac/v1alpha1/rolebinding.go | 17 - .../typed/rbac/v1beta1/clusterrole.go | 17 - .../typed/rbac/v1beta1/clusterrolebinding.go | 17 - .../typed/rbac/v1beta1/rbac_client.go | 3 +- .../kubernetes/typed/rbac/v1beta1/role.go | 17 - .../typed/rbac/v1beta1/rolebinding.go | 17 - .../typed/scheduling/v1/priorityclass.go | 164 - .../typed/scheduling/v1/scheduling_client.go | 89 - .../scheduling/v1alpha1/priorityclass.go | 17 - .../scheduling/v1alpha1/scheduling_client.go | 3 +- .../typed/scheduling/v1beta1/priorityclass.go | 17 - .../scheduling/v1beta1/scheduling_client.go | 3 +- .../typed/settings/v1alpha1/podpreset.go | 17 - .../settings/v1alpha1/settings_client.go | 3 +- .../typed/storage/v1/generated_expansion.go | 2 - .../typed/storage/v1/storage_client.go | 8 +- .../typed/storage/v1/storageclass.go | 17 - .../typed/storage/v1/volumeattachment.go | 180 - .../typed/storage/v1alpha1/storage_client.go | 3 +- .../storage/v1alpha1/volumeattachment.go | 17 - .../typed/storage/v1beta1/csidriver.go | 164 - .../typed/storage/v1beta1/csinode.go | 164 - .../storage/v1beta1/generated_expansion.go | 4 - .../typed/storage/v1beta1/storage_client.go | 13 +- .../typed/storage/v1beta1/storageclass.go | 17 - .../typed/storage/v1beta1/volumeattachment.go | 17 - .../v1alpha1/expansion_generated.go | 6 +- .../v1alpha1/initializerconfiguration.go | 65 + .../apps/v1beta1/expansion_generated.go | 8 + .../client-go/listers/apps/v1beta1/scale.go | 94 + .../apps/v1beta2/expansion_generated.go | 8 + .../client-go/listers/apps/v1beta2/scale.go | 94 + .../auditregistration/v1alpha1/auditsink.go | 65 - .../listers/coordination/v1/lease.go | 94 - .../extensions/v1beta1/expansion_generated.go | 16 +- .../extensions/v1beta1/networkpolicy.go | 94 - .../listers/extensions/v1beta1/scale.go | 94 + .../listers/networking/v1beta1/ingress.go | 94 - .../listers/node/v1alpha1/runtimeclass.go | 65 - .../node/v1beta1/expansion_generated.go | 23 - .../listers/node/v1beta1/runtimeclass.go | 65 - .../v1beta1/poddisruptionbudget_expansion.go | 4 +- .../scheduling/v1/expansion_generated.go | 23 - .../listers/scheduling/v1/priorityclass.go | 65 - .../listers/storage/v1/expansion_generated.go | 4 - .../listers/storage/v1/volumeattachment.go | 65 - .../listers/storage/v1beta1/csidriver.go | 65 - .../listers/storage/v1beta1/csinode.go | 65 - .../storage/v1beta1/expansion_generated.go | 8 - .../pkg/apis/clientauthentication/doc.go | 1 - .../apis/clientauthentication/v1alpha1/doc.go | 1 - .../clientauthentication/v1alpha1/types.go | 2 +- .../apis/clientauthentication/v1beta1/doc.go | 1 - vendor/k8s.io/client-go/pkg/version/doc.go | 3 +- .../plugin/pkg/client/auth/exec/exec.go | 19 +- vendor/k8s.io/client-go/rest/config.go | 103 +- vendor/k8s.io/client-go/rest/plugin.go | 4 +- vendor/k8s.io/client-go/rest/request.go | 49 +- vendor/k8s.io/client-go/rest/transport.go | 24 +- vendor/k8s.io/client-go/rest/urlbackoff.go | 8 +- vendor/k8s.io/client-go/rest/watch/decoder.go | 2 +- vendor/k8s.io/client-go/testing/actions.go | 24 +- vendor/k8s.io/client-go/testing/fake.go | 15 +- vendor/k8s.io/client-go/testing/fixture.go | 51 +- .../k8s.io/client-go/tools/auth/clientauth.go | 125 + .../client-go/tools/cache/controller.go | 72 +- .../client-go/tools/cache/delta_fifo.go | 36 +- .../client-go/tools/cache/expiration_cache.go | 60 +- .../tools/cache/expiration_cache_fakes.go | 2 +- .../tools/cache/fake_custom_store.go | 2 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 2 +- vendor/k8s.io/client-go/tools/cache/heap.go | 2 +- .../k8s.io/client-go/tools/cache/listers.go | 24 +- .../k8s.io/client-go/tools/cache/listwatch.go | 14 +- .../client-go/tools/cache/mutation_cache.go | 4 +- .../tools/cache/mutation_detector.go | 4 +- .../k8s.io/client-go/tools/cache/reflector.go | 137 +- .../tools/cache/reflector_metrics.go | 17 + .../client-go/tools/cache/shared_informer.go | 105 +- vendor/k8s.io/client-go/tools/cache/store.go | 2 +- .../tools/cache/thread_safe_store.go | 21 +- .../client-go/tools/clientcmd/api/doc.go | 1 - .../tools/clientcmd/api/latest/latest.go | 61 + .../client-go/tools/clientcmd/api/types.go | 44 - .../tools/clientcmd/api/v1/conversion.go | 244 + .../tools/clientcmd/api}/v1/doc.go | 4 +- .../tools/clientcmd/api}/v1/register.go | 31 +- .../client-go/tools/clientcmd/api/v1/types.go | 203 + .../clientcmd/api/v1/zz_generated.deepcopy.go | 348 + .../client-go/tools/clientcmd/auth_loaders.go | 111 + .../tools/clientcmd/client_config.go | 569 ++ .../client-go/tools/clientcmd/config.go | 490 + .../k8s.io/client-go/tools/clientcmd/doc.go | 37 + .../k8s.io/client-go/tools/clientcmd/flag.go | 49 + .../client-go/tools/clientcmd/helpers.go | 35 + .../client-go/tools/clientcmd/loader.go | 633 ++ .../tools/clientcmd/merged_client_builder.go | 168 + .../client-go/tools/clientcmd/overrides.go | 247 + .../client-go/tools/clientcmd/validation.go | 298 + vendor/k8s.io/client-go/tools/pager/pager.go | 114 - vendor/k8s.io/client-go/tools/record/doc.go | 18 + vendor/k8s.io/client-go/tools/record/event.go | 322 + .../client-go/tools/record/events_cache.go | 462 + vendor/k8s.io/client-go/tools/record/fake.go | 58 + vendor/k8s.io/client-go/transport/config.go | 13 +- .../client-go/transport/round_trippers.go | 38 +- .../client-go/transport/token_source.go | 13 +- .../k8s.io/client-go/transport/transport.go | 58 - .../util}/buffer/ring_growing.go | 0 vendor/k8s.io/client-go/util/cert/cert.go | 106 +- vendor/k8s.io/client-go/util/cert/io.go | 95 + vendor/k8s.io/client-go/util/cert/pem.go | 208 + .../client-go/util/flowcontrol/backoff.go | 4 +- .../k8s.io/client-go/util/homedir/homedir.go | 47 + .../util}/integer/integer.go | 6 - vendor/k8s.io/client-go/util/keyutil/key.go | 323 - .../util/workqueue/default_rate_limiters.go | 211 + .../util/workqueue/delaying_queue.go | 255 + vendor/k8s.io/client-go/util/workqueue/doc.go | 26 + .../client-go/util/workqueue/metrics.go | 195 + .../client-go/util/workqueue/parallelizer.go | 69 + .../k8s.io/client-go/util/workqueue/queue.go | 172 + .../util/workqueue/rate_limitting_queue.go | 69 + .../.github/PULL_REQUEST_TEMPLATE.md | 2 +- vendor/k8s.io/code-generator/CONTRIBUTING.md | 4 +- .../k8s.io/code-generator/Godeps/Godeps.json | 276 +- vendor/k8s.io/code-generator/Godeps/OWNERS | 2 - vendor/k8s.io/code-generator/OWNERS | 2 - vendor/k8s.io/code-generator/README.md | 2 +- .../k8s.io/code-generator/SECURITY_CONTACTS | 6 +- .../MixedCase/apis/example/v1/types.go | 74 - .../typed/example/v1/clustertesttype.go | 210 - .../versioned/typed/example/v1/doc.go | 20 - .../typed/example/v1/example_client.go | 94 - .../example/v1/fake/fake_clustertesttype.go | 152 - .../typed/example/v1/fake/fake_testtype.go | 140 - .../versioned/typed/example/v1/testtype.go | 191 - .../example/v1/clustertesttype.go | 88 - .../externalversions/example/v1/interface.go | 52 - .../externalversions/example/v1/testtype.go | 89 - .../listers/example/v1/clustertesttype.go | 65 - .../MixedCase/listers/example/v1/testtype.go | 94 - .../_examples/apiserver/apis/example/doc.go | 1 - .../apiserver/apis/example/v1/doc.go | 1 - .../apis/example/v1/zz_generated.deepcopy.go | 2 +- .../apis/example/zz_generated.deepcopy.go | 2 +- .../_examples/apiserver/apis/example2/doc.go | 1 - .../apiserver/apis/example2/v1/doc.go | 1 - .../apis/example2/v1/zz_generated.deepcopy.go | 2 +- .../apis/example2/zz_generated.deepcopy.go | 2 +- .../fake/clientset_generated.go | 7 +- .../internalversion/fake/fake_testtype.go | 2 +- .../typed/example/internalversion/testtype.go | 17 - .../internalversion/fake/fake_testtype.go | 2 +- .../example2/internalversion/testtype.go | 17 - .../clientset/versioned/clientset.go | 16 + .../versioned/fake/clientset_generated.go | 17 +- .../typed/example/v1/example_client.go | 3 +- .../typed/example/v1/fake/fake_testtype.go | 2 +- .../versioned/typed/example/v1/testtype.go | 17 - .../typed/example2/v1/example2_client.go | 3 +- .../typed/example2/v1/fake/fake_testtype.go | 2 +- .../versioned/typed/example2/v1/testtype.go | 17 - .../internalinterfaces/factory_interfaces.go | 2 - .../internalinterfaces/factory_interfaces.go | 2 - .../_examples/crd/apis/example/v1/doc.go | 1 - .../apis/example/v1/zz_generated.deepcopy.go | 4 +- .../_examples/crd/apis/example2/v1/doc.go | 1 - .../apis/example2/v1/zz_generated.deepcopy.go | 2 +- .../crd/clientset/versioned/clientset.go | 16 + .../versioned/fake/clientset_generated.go | 17 +- .../typed/example/v1/clustertesttype.go | 17 - .../typed/example/v1/example_client.go | 3 +- .../example/v1/fake/fake_clustertesttype.go | 4 +- .../typed/example/v1/fake/fake_testtype.go | 2 +- .../versioned/typed/example/v1/testtype.go | 17 - .../typed/example2/v1/example2_client.go | 3 +- .../typed/example2/v1/fake/fake_testtype.go | 2 +- .../versioned/typed/example2/v1/testtype.go | 17 - .../internalinterfaces/factory_interfaces.go | 2 - .../code-generator/cmd/client-gen/OWNERS | 2 - .../code-generator/cmd/client-gen/README.md | 2 +- .../client-gen/generators/client_generator.go | 35 +- .../generators/fake/fake_client_generator.go | 4 +- .../fake/generator_fake_for_clientset.go | 17 +- .../fake/generator_fake_for_group.go | 2 +- .../fake/generator_fake_for_type.go | 6 +- .../generators/generator_for_clientset.go | 20 +- .../generators/generator_for_group.go | 3 +- .../generators/generator_for_type.go | 20 - .../generators/scheme/generator_for_scheme.go | 5 +- .../cmd/client-gen/generators/tags.go} | 23 +- .../code-generator/cmd/client-gen/main.go | 7 +- .../cmd/client-gen/types/helpers.go | 4 +- .../cmd/client-gen/types/types.go | 7 +- .../conversion-gen/generators/conversion.go | 66 +- .../code-generator/cmd/conversion-gen/main.go | 72 +- .../code-generator/cmd/deepcopy-gen/main.go | 9 +- .../code-generator/cmd/defaulter-gen/main.go | 9 +- .../code-generator/cmd/go-to-protobuf/OWNERS | 2 - .../cmd/go-to-protobuf/protobuf/cmd.go | 83 +- .../cmd/go-to-protobuf/protobuf/generator.go | 8 +- .../cmd/go-to-protobuf/protobuf/tags.go | 4 +- .../code-generator/cmd/import-boss/main.go | 17 +- .../cmd/informer-gen/generators/factory.go | 8 +- .../generators/factoryinterface.go | 6 +- .../cmd/informer-gen/generators/generic.go | 8 +- .../cmd/informer-gen/generators/informer.go | 4 +- .../cmd/informer-gen/generators/packages.go | 16 +- .../cmd/informer-gen}/generators/tags.go | 4 +- .../generators/versioninterface.go | 2 +- .../code-generator/cmd/informer-gen/main.go | 9 +- .../cmd/lister-gen/generators/lister.go | 10 +- .../cmd/lister-gen/generators/tags.go | 33 + .../code-generator/cmd/lister-gen/main.go | 9 +- .../code-generator/cmd/openapi-gen/README | 13 + .../cmd/openapi-gen/args/args.go | 53 + .../code-generator/cmd/openapi-gen/main.go | 61 + .../cmd/register-gen/generators/packages.go | 18 +- .../code-generator/cmd/register-gen/main.go | 9 +- .../k8s.io/code-generator/cmd/set-gen/main.go | 7 +- .../k8s.io/code-generator/generate-groups.sh | 30 +- .../generate-internal-groups.sh | 37 +- vendor/k8s.io/code-generator/go.mod | 21 - vendor/k8s.io/code-generator/go.sum | 29 - .../code-generator/hack/update-codegen.sh | 12 +- .../code-generator/hack/verify-codegen.sh | 7 +- .../code-generator/pkg/namer/tag-override.go | 58 - vendor/k8s.io/code-generator/tools.go | 34 - .../import-boss/generators/import_restrict.go | 290 - .../gengo/examples/set-gen/generators/sets.go | 360 - vendor/k8s.io/klog/klog.go | 95 +- vendor/k8s.io/klog/klog_file.go | 19 +- vendor/k8s.io/utils/trace/trace.go | 97 - vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 - 1991 files changed, 187136 insertions(+), 110876 deletions(-) create mode 100644 .errcheck.txt create mode 100644 .golangci.yml create mode 120000 cmd/controller/kodata/LICENSE create mode 120000 cmd/controller/kodata/VENDOR-LICENSE create mode 100644 cmd/controller/main.go create mode 100644 config/config-logging.yaml create mode 100644 config/config-observability.yaml create mode 100644 config/controller-service.yaml create mode 100644 config/controller.yaml create mode 100644 pkg/client/injection/client/client.go create mode 100644 pkg/client/injection/client/fake/fake.go create mode 100644 pkg/client/injection/informers/triggers/factory/fake/fake.go create mode 100644 pkg/client/injection/informers/triggers/factory/triggersfactory.go create mode 100644 pkg/client/injection/informers/triggers/v1alpha1/eventlistener/eventlistener.go create mode 100644 pkg/client/injection/informers/triggers/v1alpha1/eventlistener/fake/fake.go create mode 100644 pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/fake/fake.go create mode 100644 pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/triggerbinding.go create mode 100644 pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/fake/fake.go create mode 100644 pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/triggertemplate.go create mode 100644 pkg/reconciler/reconciler.go create mode 100644 pkg/reconciler/v1alpha1/eventlistener/controller.go create mode 100644 pkg/reconciler/v1alpha1/eventlistener/eventlistener.go create mode 100755 test/e2e-common.sh rename config/triggers.yaml => test/e2e-tests.sh (58%) mode change 100644 => 100755 rename vendor/{k8s.io/utils => cloud.google.com/go}/LICENSE (100%) create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/doc.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/group_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/path_funcs.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/trace_client.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/LICENSE create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/LICENSE (100%) rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/fields.go (99%) rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/yaml.go (77%) create mode 100644 vendor/github.com/golang/glog/LICENSE create mode 100644 vendor/github.com/golang/glog/glog.go create mode 100644 vendor/github.com/golang/glog/glog_file.go create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 vendor/github.com/google/btree/LICENSE create mode 100644 vendor/github.com/google/btree/btree.go create mode 100644 vendor/github.com/google/btree/btree_mem.go create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/googleapis/gax-go/LICENSE create mode 100644 vendor/github.com/googleapis/gax-go/v2/call_option.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/gax.go rename vendor/github.com/{golang/protobuf/proto/deprecated.go => googleapis/gax-go/v2/header.go} (60%) create mode 100644 vendor/github.com/googleapis/gax-go/v2/invoke.go create mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt create mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go create mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go rename vendor/{k8s.io/utils/third_party/forked/golang => github.com/imdario/mergo}/LICENSE (96%) create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/imdario/mergo/testdata/license.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/knative/caching/.gitattributes create mode 100644 vendor/github.com/knative/caching/.github/issue-template.md create mode 100644 vendor/github.com/knative/caching/.github/pull-request-template.md create mode 100644 vendor/github.com/knative/caching/.gitignore create mode 100644 vendor/github.com/knative/caching/CONTRIBUTING.md create mode 100644 vendor/github.com/knative/caching/DEVELOPMENT.md create mode 100644 vendor/github.com/knative/caching/Gopkg.lock create mode 100644 vendor/github.com/knative/caching/Gopkg.toml create mode 100644 vendor/github.com/knative/caching/LICENSE create mode 100644 vendor/github.com/knative/caching/OWNERS create mode 100644 vendor/github.com/knative/caching/README.md create mode 100644 vendor/github.com/knative/caching/code-of-conduct.md create mode 100644 vendor/github.com/knative/caching/config/image.yaml create mode 100644 vendor/github.com/knative/caching/hack/OWNERS create mode 100755 vendor/github.com/knative/caching/hack/boilerplate/add-boilerplate.sh rename cmd/triggers/main.go => vendor/github.com/knative/caching/hack/boilerplate/boilerplate.go.txt (82%) create mode 100644 vendor/github.com/knative/caching/hack/boilerplate/boilerplate.sh.txt create mode 100755 vendor/github.com/knative/caching/hack/update-codegen.sh create mode 100755 vendor/github.com/knative/caching/hack/update-deps.sh create mode 100755 vendor/github.com/knative/caching/hack/verify-codegen.sh create mode 100644 vendor/github.com/knative/caching/pkg/apis/caching/register.go create mode 100644 vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/doc.go create mode 100644 vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_defaults.go create mode 100644 vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_types.go create mode 100644 vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_validation.go rename vendor/{k8s.io/api/node => github.com/knative/caching/pkg/apis/caching}/v1alpha1/register.go (68%) rename vendor/{k8s.io/code-generator/_examples/MixedCase/apis/example/v1 => github.com/knative/caching/pkg/apis/caching/v1alpha1}/zz_generated.deepcopy.go (52%) create mode 100644 vendor/github.com/knative/caching/pkg/client/clientset/versioned/clientset.go rename vendor/{k8s.io/client-go/kubernetes/typed/scheduling/v1 => github.com/knative/caching/pkg/client/clientset/versioned}/doc.go (83%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/knative/caching/pkg/client}/clientset/versioned/fake/clientset_generated.go (74%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/knative/caching/pkg/client}/clientset/versioned/fake/doc.go (94%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/knative/caching/pkg/client}/clientset/versioned/fake/register.go (92%) create mode 100644 vendor/github.com/knative/caching/pkg/client/clientset/versioned/scheme/doc.go rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/knative/caching/pkg/client}/clientset/versioned/scheme/register.go (92%) rename vendor/{k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go => github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/caching_client.go} (54%) rename vendor/{k8s.io/client-go/kubernetes/typed/networking/v1beta1 => github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1}/doc.go (92%) rename vendor/{k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1 => github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1}/fake/doc.go (94%) rename vendor/{k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_example_client.go => github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_caching_client.go} (66%) create mode 100644 vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_image.go rename vendor/{k8s.io/client-go/kubernetes/typed/auditregistration => github.com/knative/caching/pkg/client/clientset/versioned/typed/caching}/v1alpha1/generated_expansion.go (89%) create mode 100644 vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/image.go rename vendor/{k8s.io/client-go/informers/node => github.com/knative/caching/pkg/client/informers/externalversions/caching}/interface.go (74%) create mode 100644 vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1/image.go rename vendor/{k8s.io/client-go/informers/coordination/v1 => github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1}/interface.go (76%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/knative/caching/pkg/client}/informers/externalversions/factory.go (92%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/knative/caching/pkg/client}/informers/externalversions/generic.go (79%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/knative/caching/pkg/client}/informers/externalversions/internalinterfaces/factory_interfaces.go (80%) rename vendor/{k8s.io/client-go/listers/node => github.com/knative/caching/pkg/client/listers/caching}/v1alpha1/expansion_generated.go (67%) create mode 100644 vendor/github.com/knative/caching/pkg/client/listers/caching/v1alpha1/image.go create mode 100644 vendor/github.com/knative/caching/test/OWNERS create mode 100755 vendor/github.com/knative/caching/test/presubmit-tests.sh create mode 100644 vendor/github.com/knative/pkg/.gitattributes create mode 100644 vendor/github.com/knative/pkg/.github/issue-template.md create mode 100644 vendor/github.com/knative/pkg/.github/pull-request-template.md create mode 100644 vendor/github.com/knative/pkg/.gitignore create mode 100644 vendor/github.com/knative/pkg/CONTRIBUTING.md create mode 100644 vendor/github.com/knative/pkg/DEVELOPMENT.md create mode 100644 vendor/github.com/knative/pkg/Gopkg.lock create mode 100644 vendor/github.com/knative/pkg/Gopkg.toml create mode 100644 vendor/github.com/knative/pkg/OWNERS create mode 100644 vendor/github.com/knative/pkg/OWNERS_ALIASES create mode 100644 vendor/github.com/knative/pkg/README.md create mode 100644 vendor/github.com/knative/pkg/apis/OWNERS create mode 100644 vendor/github.com/knative/pkg/apis/duck/OWNERS create mode 100644 vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go create mode 100644 vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go create mode 100644 vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go rename vendor/{k8s.io/api/networking/v1beta1 => github.com/knative/pkg/apis/duck/v1alpha1}/doc.go (68%) create mode 100644 vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go create mode 100644 vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go create mode 100644 vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go create mode 100644 vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/OWNERS rename vendor/{k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go => github.com/knative/pkg/apis/istio/authentication/register.go} (80%) rename vendor/github.com/knative/{test-infra/scripts/dummy.go => pkg/apis/istio/authentication/v1alpha1/doc.go} (63%) create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go rename vendor/{k8s.io/api/auditregistration => github.com/knative/pkg/apis/istio/authentication}/v1alpha1/register.go (60%) rename vendor/{k8s.io/api/networking/v1beta1 => github.com/knative/pkg/apis/istio/authentication/v1alpha1}/zz_generated.deepcopy.go (56%) create mode 100644 vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/register.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/README.md create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go rename vendor/{k8s.io/api/coordination/v1 => github.com/knative/pkg/apis/istio/v1alpha3}/doc.go (68%) create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go rename vendor/{k8s.io/api/node/v1beta1 => github.com/knative/pkg/apis/istio/v1alpha3}/register.go (67%) create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go create mode 100644 vendor/github.com/knative/pkg/apis/testing/conditions.go create mode 100644 vendor/github.com/knative/pkg/changeset/commit.go create mode 100644 vendor/github.com/knative/pkg/changeset/doc.go create mode 100644 vendor/github.com/knative/pkg/changeset/testdata/HEAD create mode 100644 vendor/github.com/knative/pkg/changeset/testdata/noncommitted/HEAD create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go rename vendor/{k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go => github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go} (51%) rename vendor/{k8s.io/client-go/kubernetes/typed/node/v1beta1 => github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1}/doc.go (92%) create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go rename vendor/{k8s.io/client-go/kubernetes/typed/networking/v1beta1 => github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1}/generated_expansion.go (87%) create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go rename vendor/{k8s.io/client-go/kubernetes/typed/coordination/v1 => github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3}/doc.go (92%) create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go rename vendor/{k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1 => github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3}/generated_expansion.go (77%) create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go rename vendor/{k8s.io/client-go/informers/auditregistration => github.com/knative/pkg/client/informers/externalversions/authentication}/interface.go (84%) rename vendor/{k8s.io/client-go/informers/auditregistration => github.com/knative/pkg/client/informers/externalversions/authentication}/v1alpha1/interface.go (74%) rename vendor/{k8s.io/client-go/informers/coordination/v1/lease.go => github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go} (51%) create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/factory.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/generic.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go rename vendor/{k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example => github.com/knative/pkg/client/informers/externalversions/istio}/interface.go (68%) create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go rename vendor/{k8s.io/client-go/informers/networking/v1beta1/ingress.go => github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go} (52%) rename vendor/{k8s.io/client-go/informers/networking/v1beta1 => github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3}/interface.go (53%) rename vendor/{k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go => github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go} (53%) create mode 100644 vendor/github.com/knative/pkg/client/injection/client/client.go create mode 100644 vendor/github.com/knative/pkg/client/injection/client/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/authenticationfactory.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/policy.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/factory/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/factory/istiofactory.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/destinationrule.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/gateway.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/virtualservice.go rename vendor/{k8s.io/client-go/listers/coordination/v1 => github.com/knative/pkg/client/listers/authentication/v1alpha1}/expansion_generated.go (64%) create mode 100644 vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/OWNERS create mode 100644 vendor/github.com/knative/pkg/cloudevents/README.md create mode 100644 vendor/github.com/knative/pkg/cloudevents/builder.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/client.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/doc.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/encoding_binary.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/encoding_structured.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/event.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/event_v01.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/event_v02.go create mode 100644 vendor/github.com/knative/pkg/cloudevents/handler.go create mode 100644 vendor/github.com/knative/pkg/code-of-conduct.md create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/args/args.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/client.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/factory.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeclient.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakefactory.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeinformer.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/informer.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/namesystems.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/packages.go create mode 100644 vendor/github.com/knative/pkg/codegen/cmd/injection-gen/main.go create mode 100644 vendor/github.com/knative/pkg/configmap/OWNERS create mode 100644 vendor/github.com/knative/pkg/configmap/testing/configmap.go create mode 100644 vendor/github.com/knative/pkg/controller/OWNERS create mode 100644 vendor/github.com/knative/pkg/controller/controller.go create mode 100644 vendor/github.com/knative/pkg/controller/helper.go create mode 100644 vendor/github.com/knative/pkg/controller/stats_reporter.go create mode 100644 vendor/github.com/knative/pkg/controller/testing/fake_stats_reporter.go create mode 100644 vendor/github.com/knative/pkg/hack/OWNERS create mode 100644 vendor/github.com/knative/pkg/hack/boilerplate/boilerplate.go.txt create mode 100755 vendor/github.com/knative/pkg/hack/generate-knative.sh create mode 100755 vendor/github.com/knative/pkg/hack/update-codegen.sh create mode 100755 vendor/github.com/knative/pkg/hack/update-deps.sh create mode 100755 vendor/github.com/knative/pkg/hack/verify-codegen.sh create mode 100644 vendor/github.com/knative/pkg/injection/OWNERS create mode 100644 vendor/github.com/knative/pkg/injection/README.md create mode 100644 vendor/github.com/knative/pkg/injection/clients.go create mode 100644 vendor/github.com/knative/pkg/injection/clients/apiextclient/apiext.go create mode 100644 vendor/github.com/knative/pkg/injection/clients/apiextclient/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/clients/dynamicclient/dynamicclient.go create mode 100644 vendor/github.com/knative/pkg/injection/clients/dynamicclient/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/clients/kubeclient/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/clients/kubeclient/kubeclient.go create mode 100644 vendor/github.com/knative/pkg/injection/doc.go create mode 100644 vendor/github.com/knative/pkg/injection/factories.go create mode 100644 vendor/github.com/knative/pkg/injection/informers.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/crd.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/factory.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/deployment.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/hpa.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/hpa.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/job.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/configmap.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/endpoints.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/namespace.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/pod.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/secret.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/service.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/service.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/factory.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/fake/fake.go create mode 100644 vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/rolebinding.go create mode 100644 vendor/github.com/knative/pkg/injection/interface.go create mode 100644 vendor/github.com/knative/pkg/injection/sharedmain/main.go create mode 100644 vendor/github.com/knative/pkg/kmeta/OWNERS create mode 100644 vendor/github.com/knative/pkg/kmeta/accessor.go create mode 100644 vendor/github.com/knative/pkg/kmeta/doc.go create mode 100644 vendor/github.com/knative/pkg/kmeta/labels.go create mode 100644 vendor/github.com/knative/pkg/kmeta/owner_references.go create mode 100644 vendor/github.com/knative/pkg/logging/OWNERS create mode 100644 vendor/github.com/knative/pkg/logging/config.go create mode 100644 vendor/github.com/knative/pkg/logging/logger.go create mode 100644 vendor/github.com/knative/pkg/logging/logkey/constants.go create mode 100644 vendor/github.com/knative/pkg/logging/testing/util.go create mode 100644 vendor/github.com/knative/pkg/logging/zz_generated.deepcopy.go create mode 100644 vendor/github.com/knative/pkg/metrics/OWNERS create mode 100644 vendor/github.com/knative/pkg/metrics/config.go rename vendor/{k8s.io/api/node/v1beta1 => github.com/knative/pkg/metrics}/doc.go (71%) create mode 100644 vendor/github.com/knative/pkg/metrics/exporter.go create mode 100644 vendor/github.com/knative/pkg/metrics/gcp_metadata.go create mode 100644 vendor/github.com/knative/pkg/metrics/metricskey/constants.go create mode 100644 vendor/github.com/knative/pkg/metrics/monitored_resources.go create mode 100644 vendor/github.com/knative/pkg/metrics/prometheus_exporter.go create mode 100644 vendor/github.com/knative/pkg/metrics/record.go create mode 100644 vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go create mode 100644 vendor/github.com/knative/pkg/metrics/testing/config.go create mode 100644 vendor/github.com/knative/pkg/ptr/doc.go create mode 100644 vendor/github.com/knative/pkg/ptr/ptr.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/actions.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/clock.go rename vendor/{k8s.io/code-generator/_examples/MixedCase/listers/example/v1/expansion_generated.go => github.com/knative/pkg/reconciler/testing/context.go} (50%) create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/events.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/hooks.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/reactions.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/sorter.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/stats.go create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/table.go rename vendor/{k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.defaults.go => github.com/knative/pkg/reconciler/testing/tracker.go} (56%) create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/util.go create mode 100644 vendor/github.com/knative/pkg/signals/signal.go create mode 100644 vendor/github.com/knative/pkg/signals/signal_posix.go rename vendor/{k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go => github.com/knative/pkg/signals/signal_windows.go} (81%) rename vendor/{k8s.io/api/auditregistration/v1alpha1/doc.go => github.com/knative/pkg/system/clock.go} (69%) create mode 100644 vendor/github.com/knative/pkg/system/names.go create mode 100644 vendor/github.com/knative/pkg/system/testing/names.go create mode 100644 vendor/github.com/knative/pkg/test/OWNERS create mode 100644 vendor/github.com/knative/pkg/test/README.md create mode 100644 vendor/github.com/knative/pkg/test/cleanup.go create mode 100644 vendor/github.com/knative/pkg/test/clients.go create mode 100644 vendor/github.com/knative/pkg/test/crd.go create mode 100644 vendor/github.com/knative/pkg/test/e2e_flags.go create mode 100644 vendor/github.com/knative/pkg/test/helpers/data.go create mode 100644 vendor/github.com/knative/pkg/test/ingress/ingress.go create mode 100644 vendor/github.com/knative/pkg/test/kube_checks.go create mode 100644 vendor/github.com/knative/pkg/test/logging/logging.go create mode 100644 vendor/github.com/knative/pkg/test/monitoring/doc.go create mode 100644 vendor/github.com/knative/pkg/test/monitoring/monitoring.go create mode 100755 vendor/github.com/knative/pkg/test/presubmit-tests.sh create mode 100644 vendor/github.com/knative/pkg/test/request.go create mode 100644 vendor/github.com/knative/pkg/test/spoof/error_checks.go create mode 100644 vendor/github.com/knative/pkg/test/spoof/spoof.go create mode 100644 vendor/github.com/knative/pkg/test/zipkin/doc.go create mode 100644 vendor/github.com/knative/pkg/test/zipkin/util.go create mode 100644 vendor/github.com/knative/pkg/testing/doc.go create mode 100644 vendor/github.com/knative/pkg/testing/inner_default_resource.go create mode 100644 vendor/github.com/knative/pkg/testing/register.go create mode 100644 vendor/github.com/knative/pkg/testing/resource.go create mode 100644 vendor/github.com/knative/pkg/testing/zz_generated.deepcopy.go rename vendor/{k8s.io/api/node/v1alpha1 => github.com/knative/pkg/tracing/config}/doc.go (74%) create mode 100644 vendor/github.com/knative/pkg/tracing/config/tracing.go rename vendor/{k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go => github.com/knative/pkg/tracing/config/zz_generated.deepcopy.go} (51%) create mode 100644 vendor/github.com/knative/pkg/tracing/http.go create mode 100644 vendor/github.com/knative/pkg/tracing/opencensus.go create mode 100644 vendor/github.com/knative/pkg/tracing/zipkin.go create mode 100644 vendor/github.com/knative/pkg/tracker/doc.go create mode 100644 vendor/github.com/knative/pkg/tracker/enqueue.go create mode 100644 vendor/github.com/knative/pkg/tracker/interface.go create mode 100644 vendor/github.com/knative/pkg/version/version.go create mode 100644 vendor/github.com/knative/pkg/webhook/OWNERS create mode 100644 vendor/github.com/knative/pkg/webhook/certs.go create mode 100644 vendor/github.com/knative/pkg/webhook/webhook.go create mode 100644 vendor/github.com/knative/pkg/websocket/connection.go create mode 100644 vendor/github.com/knative/pkg/websocket/hijack.go delete mode 100644 vendor/github.com/knative/test-infra/scripts/README.md delete mode 100755 vendor/github.com/knative/test-infra/scripts/e2e-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/scripts/library.sh delete mode 100644 vendor/github.com/knative/test-infra/scripts/markdown-link-check-config.rc delete mode 100644 vendor/github.com/knative/test-infra/scripts/markdown-lint-config.rc delete mode 100755 vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/scripts/release.sh delete mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/README.md create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/petar/GoLLRB/AUTHORS create mode 100644 vendor/github.com/petar/GoLLRB/LICENSE create mode 100644 vendor/github.com/petar/GoLLRB/llrb/avgvar.go create mode 100644 vendor/github.com/petar/GoLLRB/llrb/iterator.go create mode 100644 vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go create mode 100644 vendor/github.com/petar/GoLLRB/llrb/llrb.go create mode 100644 vendor/github.com/petar/GoLLRB/llrb/util.go create mode 100644 vendor/github.com/peterbourgon/diskv/LICENSE create mode 100644 vendor/github.com/peterbourgon/diskv/compression.go create mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go create mode 100644 vendor/github.com/peterbourgon/diskv/index.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/client_model/ruby/LICENSE create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/tektoncd/pipeline/pkg/client}/clientset/versioned/clientset.go (70%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/tektoncd/pipeline/pkg/client}/clientset/versioned/doc.go (95%) rename vendor/{k8s.io/code-generator/_examples/MixedCase => github.com/tektoncd/pipeline/pkg/client}/clientset/versioned/scheme/doc.go (95%) create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go rename vendor/{k8s.io/client-go/kubernetes/typed/node => github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline}/v1alpha1/doc.go (95%) create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go create mode 100644 vendor/go.opencensus.io/AUTHORS create mode 100644 vendor/go.opencensus.io/LICENSE create mode 100644 vendor/go.opencensus.io/exporter/prometheus/prometheus.go create mode 100644 vendor/go.opencensus.io/internal/internal.go create mode 100644 vendor/go.opencensus.io/internal/sanitize.go create mode 100644 vendor/go.opencensus.io/internal/tagencoding/tagencoding.go create mode 100644 vendor/go.opencensus.io/internal/traceinternals.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/doc.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/label.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/unit.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go create mode 100644 vendor/go.opencensus.io/opencensus.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/doc.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/client.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/client_stats.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/doc.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/route.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/server.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/stats.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/trace.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go create mode 100644 vendor/go.opencensus.io/resource/resource.go create mode 100644 vendor/go.opencensus.io/stats/doc.go create mode 100644 vendor/go.opencensus.io/stats/internal/record.go create mode 100644 vendor/go.opencensus.io/stats/measure.go create mode 100644 vendor/go.opencensus.io/stats/measure_float64.go create mode 100644 vendor/go.opencensus.io/stats/measure_int64.go create mode 100644 vendor/go.opencensus.io/stats/record.go create mode 100644 vendor/go.opencensus.io/stats/units.go create mode 100644 vendor/go.opencensus.io/stats/view/aggregation.go create mode 100644 vendor/go.opencensus.io/stats/view/aggregation_data.go create mode 100644 vendor/go.opencensus.io/stats/view/collector.go create mode 100644 vendor/go.opencensus.io/stats/view/doc.go create mode 100644 vendor/go.opencensus.io/stats/view/export.go create mode 100644 vendor/go.opencensus.io/stats/view/view.go create mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go create mode 100644 vendor/go.opencensus.io/stats/view/worker.go create mode 100644 vendor/go.opencensus.io/stats/view/worker_commands.go create mode 100644 vendor/go.opencensus.io/tag/context.go create mode 100644 vendor/go.opencensus.io/tag/doc.go create mode 100644 vendor/go.opencensus.io/tag/key.go create mode 100644 vendor/go.opencensus.io/tag/map.go create mode 100644 vendor/go.opencensus.io/tag/map_codec.go create mode 100644 vendor/go.opencensus.io/tag/profile_19.go create mode 100644 vendor/go.opencensus.io/tag/profile_not19.go create mode 100644 vendor/go.opencensus.io/tag/validate.go create mode 100644 vendor/go.opencensus.io/trace/basetypes.go create mode 100644 vendor/go.opencensus.io/trace/config.go create mode 100644 vendor/go.opencensus.io/trace/doc.go create mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go create mode 100644 vendor/go.opencensus.io/trace/export.go create mode 100644 vendor/go.opencensus.io/trace/internal/internal.go create mode 100644 vendor/go.opencensus.io/trace/lrumap.go create mode 100644 vendor/go.opencensus.io/trace/propagation/propagation.go create mode 100644 vendor/go.opencensus.io/trace/sampling.go create mode 100644 vendor/go.opencensus.io/trace/spanbucket.go create mode 100644 vendor/go.opencensus.io/trace/spanstore.go create mode 100644 vendor/go.opencensus.io/trace/status_codes.go create mode 100644 vendor/go.opencensus.io/trace/trace.go create mode 100644 vendor/go.opencensus.io/trace/trace_go11.go create mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go create mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go create mode 100644 vendor/go.uber.org/atomic/LICENSE.txt create mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/error.go create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen1.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/doc.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE rename vendor/{k8s.io/utils/third_party/forked/golang => golang.org/x/sync}/PATENTS (100%) create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 120000 vendor/gonum.org/v1/gonum/.travis/deps.d/linux/01-deps.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/deps.d/osx/nothing.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/deps.d/windows/nothing.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/run.d/linux/01-check-copyright.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/run.d/linux/02-check-imports.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/run.d/linux/03-check-formatting.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/run.d/linux/04-test.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/run.d/linux/05-test-coverage.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/run.d/linux/06-check-generate.sh delete mode 120000 vendor/gonum.org/v1/gonum/.travis/run.d/osx/01-test.sh delete mode 100644 vendor/gonum.org/v1/gonum/AUTHORS delete mode 100644 vendor/gonum.org/v1/gonum/CONTRIBUTORS delete mode 100644 vendor/gonum.org/v1/gonum/LICENSE delete mode 100644 vendor/gonum.org/v1/gonum/blas/blas.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/blas64.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/conv.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/errors.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/gemv.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/gonum.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go delete mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go delete mode 100644 vendor/gonum.org/v1/gonum/floats/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/floats/floats.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/formats/cytoscapejs/testdata/LICENSE delete mode 100644 vendor/gonum.org/v1/gonum/graph/formats/sigmajs/testdata/LICENSE.txt delete mode 100644 vendor/gonum.org/v1/gonum/graph/graph.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/same.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/set.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/edges.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/lines.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/nodes.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/multigraph.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/nodes_edges.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/directed.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/simple.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/undirected.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/tarjan.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/topo/topo.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/traverse/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/traverse/traverse.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/undirect.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/math32/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/math32/math.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/math32/signbit.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go delete mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s delete mode 100644 vendor/gonum.org/v1/gonum/lapack/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrs.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/errors.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/band.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/cdense.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/cholesky.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/cmatrix.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/consts.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/dense.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/diagonal.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/eigen.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/errors.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/format.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/gsvd.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/hogsvd.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/index_bound_checks.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/inner.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/io.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/lq.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/lu.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/matrix.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/offset.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/offset_appengine.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/pool.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/product.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/qr.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/shadow.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/solve.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/svd.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/symband.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/symmetric.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/triangular.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/triband.go delete mode 100644 vendor/gonum.org/v1/gonum/mat/vector.go create mode 100644 vendor/google.golang.org/api/AUTHORS create mode 100644 vendor/google.golang.org/api/CONTRIBUTORS create mode 100644 vendor/google.golang.org/api/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/transport/apikey.go create mode 100644 vendor/google.golang.org/api/internal/creds.go create mode 100644 vendor/google.golang.org/api/internal/pool.go create mode 100644 vendor/google.golang.org/api/internal/settings.go create mode 100644 vendor/google.golang.org/api/iterator/iterator.go create mode 100644 vendor/google.golang.org/api/option/credentials_go19.go create mode 100644 vendor/google.golang.org/api/option/credentials_notgo19.go create mode 100644 vendor/google.golang.org/api/option/option.go create mode 100644 vendor/google.golang.org/api/support/bundler/bundler.go create mode 100644 vendor/google.golang.org/api/transport/dial.go create mode 100644 vendor/google.golang.org/api/transport/doc.go create mode 100644 vendor/google.golang.org/api/transport/go19.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial_appengine.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial_socketopt.go create mode 100644 vendor/google.golang.org/api/transport/http/dial.go create mode 100644 vendor/google.golang.org/api/transport/http/dial_appengine.go create mode 100644 vendor/google.golang.org/api/transport/http/internal/propagation/http.go create mode 100644 vendor/google.golang.org/api/transport/not_go19.go create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/socket/doc.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/alts.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/common.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/utils.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/google/google.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go create mode 100644 vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls13.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go rename vendor/k8s.io/api/{coordination/v1 => admissionregistration/v1alpha1}/generated.pb.go (50%) rename vendor/k8s.io/api/{coordination/v1 => admissionregistration/v1alpha1}/register.go (86%) create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go rename vendor/k8s.io/api/{node => admissionregistration}/v1alpha1/zz_generated.deepcopy.go (51%) delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/coordination/v1/types.go delete mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go create mode 100644 vendor/k8s.io/client-go/discovery/cached_discovery.go create mode 100644 vendor/k8s.io/client-go/discovery/round_tripper.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go rename vendor/k8s.io/client-go/informers/{node => admissionregistration}/v1alpha1/interface.go (78%) delete mode 100644 vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go delete mode 100644 vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/informers/node/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go rename vendor/k8s.io/client-go/kubernetes/typed/{auditregistration/v1alpha1/auditregistration_client.go => admissionregistration/v1alpha1/admissionregistration_client.go} (54%) rename vendor/k8s.io/client-go/kubernetes/typed/{auditregistration => admissionregistration}/v1alpha1/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{node => admissionregistration}/v1alpha1/generated_expansion.go (92%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go rename vendor/k8s.io/client-go/kubernetes/typed/{node/v1beta1/generated_expansion.go => apps/v1beta1/scale.go} (51%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go rename vendor/k8s.io/client-go/listers/{auditregistration => admissionregistration}/v1alpha1/expansion_generated.go (78%) create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go delete mode 100644 vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go delete mode 100644 vendor/k8s.io/client-go/listers/coordination/v1/lease.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go create mode 100644 vendor/k8s.io/client-go/tools/auth/clientauth.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go rename vendor/k8s.io/{code-generator/_examples/MixedCase/apis/example => client-go/tools/clientcmd/api}/v1/doc.go (82%) rename vendor/k8s.io/{code-generator/_examples/MixedCase/apis/example => client-go/tools/clientcmd/api}/v1/register.go (66%) create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/client_config.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/config.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/doc.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/flag.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/helpers.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/loader.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/overrides.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/validation.go create mode 100644 vendor/k8s.io/client-go/tools/record/doc.go create mode 100644 vendor/k8s.io/client-go/tools/record/event.go create mode 100644 vendor/k8s.io/client-go/tools/record/events_cache.go create mode 100644 vendor/k8s.io/client-go/tools/record/fake.go rename vendor/k8s.io/{utils => client-go/util}/buffer/ring_growing.go (100%) create mode 100644 vendor/k8s.io/client-go/util/homedir/homedir.go rename vendor/k8s.io/{utils => client-go/util}/integer/integer.go (81%) delete mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/delaying_queue.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/doc.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/metrics.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/parallelizer.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/queue.go create mode 100644 vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/types.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/doc.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/example_client.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/interface.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/clustertesttype.go delete mode 100644 vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/testtype.go rename vendor/k8s.io/{client-go/listers/networking/v1beta1/expansion_generated.go => code-generator/cmd/client-gen/generators/tags.go} (58%) rename vendor/k8s.io/{gengo/examples/set-gen => code-generator/cmd/informer-gen}/generators/tags.go (95%) create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go create mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/README create mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/main.go delete mode 100644 vendor/k8s.io/code-generator/go.mod delete mode 100644 vendor/k8s.io/code-generator/go.sum delete mode 100644 vendor/k8s.io/code-generator/pkg/namer/tag-override.go delete mode 100644 vendor/k8s.io/code-generator/tools.go delete mode 100644 vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go delete mode 100644 vendor/k8s.io/gengo/examples/set-gen/generators/sets.go delete mode 100644 vendor/k8s.io/utils/trace/trace.go delete mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/.errcheck.txt b/.errcheck.txt new file mode 100644 index 000000000..9b63fa73d --- /dev/null +++ b/.errcheck.txt @@ -0,0 +1 @@ +(*github.com/tektoncd/pipeline/vendor/go.uber.org/zap.SugaredLogger).Sync \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..7b05e715c --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,9 @@ +run: + build-tags: + - e2e + skip-dirs: + - vendor + - pkg/client/clientset/versioned/fake +linters-settings: + errcheck: + exclude: .errcheck.txt diff --git a/Gopkg.lock b/Gopkg.lock index cad88e6e4..ea60550d7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,87 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + digest = "1:1160aee87fa390c901b267c5d83eed5691834d800d9131cd12796ef28f942d40" + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "monitoring/apiv3", + "trace/apiv2", + ] + pruneopts = "NUT" + revision = "cf81fad90a1a1de334c4fc27e23eb9a4224b627a" + version = "v0.41.0" + +[[projects]] + digest = "1:b6eb7c2538ec2999a072c0e372a18d7b7e3aedac249f26e159586fa5f892909f" + name = "contrib.go.opencensus.io/exporter/stackdriver" + packages = [ + ".", + "monitoredresource", + ] + pruneopts = "NUT" + revision = "c06c82c832edca4eaf7b0241bd655560a1be0346" + +[[projects]] + digest = "1:acf5b7756eca7cd8133461c44771fd318ee2bef31d4cc013551165473a984ba8" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/json/jsonutil", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/xml/xmlutil", + "service/sts", + ] + pruneopts = "NUT" + revision = "420cda5d6383f94f7d9c231aa44bad3325181950" + version = "v1.20.20" + +[[projects]] + digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "NUT" + revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46" + version = "v1.0.0" + +[[projects]] + digest = "1:fa965c1fd0f17153f608037e109e62104058bc1d08d44849867795fd306fa8b8" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + ] + pruneopts = "NUT" + revision = "7f2434bc10da710debe5c4315ed6d4df454b4024" + version = "v0.1.0" + [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -17,6 +98,14 @@ revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f" version = "v4.5.0" +[[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + [[projects]] digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2" name = "github.com/gogo/protobuf" @@ -29,18 +118,45 @@ version = "v1.2.1" [[projects]] - digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4" + branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "NUT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "NUT" + revision = "869f871628b6baa9cfbc11732cdf6546b17c1298" + +[[projects]] + digest = "1:4dacf728c83400b3e9d1d3025dd3c1e93e9a1b033726d1b193dc209f3fa9cb7a" name = "github.com/golang/protobuf" packages = [ "proto", + "protoc-gen-go/descriptor", "ptypes", "ptypes/any", "ptypes/duration", + "ptypes/empty", + "ptypes/struct", "ptypes/timestamp", + "ptypes/wrappers", ] pruneopts = "NUT" - revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" - version = "v1.3.1" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + +[[projects]] + digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "NUT" + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + version = "v1.0.0" [[projects]] digest = "1:bf40199583e5143d1472fc34d10d6f4b69d97572142acf343b3e43136da40823" @@ -76,7 +192,23 @@ "stringclassifier/searchset/tokenizer", ] pruneopts = "NUT" - revision = "47b603fe1b8cf823b1227e0573cc620bf929a63b" + revision = "c3068f13fcc3961fd05f96f13c8250e350db4209" + +[[projects]] + digest = "1:ab3ec1fe3e39bac4b3ab63390767766622be35b7cab03f47f787f9ec60522a53" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "NUT" + revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" + version = "v1.1.1" + +[[projects]] + digest = "1:4b76f3e067eed897a45242383a2aa4d0a2fdbf73a8d00c03167dba80c43630b1" + name = "github.com/googleapis/gax-go" + packages = ["v2"] + pruneopts = "NUT" + revision = "bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2" + version = "v2.0.5" [[projects]] digest = "1:459a00967aaf06edff3228e128dd243d7c91b0fc11ad2f7ceaa98f094bf66796" @@ -90,6 +222,17 @@ revision = "e73c7ec21d36ddb0711cb36d1502d18363b5c2c9" version = "v0.3.0" +[[projects]] + branch = "master" + digest = "1:a86d65bc23eea505cd9139178e4d889733928fe165c7a008f41eaab039edf9df" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "NUT" + revision = "901d90724c7919163f472a9812253fb26761123d" + [[projects]] digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129" name = "github.com/hashicorp/golang-lru" @@ -101,6 +244,21 @@ revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" version = "v0.5.1" +[[projects]] + digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NUT" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" + +[[projects]] + digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "NUT" + revision = "c2b33e84" + [[projects]] digest = "1:4e903242fe176238aaa469f59d7035f5abf2aa9acfefb8964ddd203651b574e9" name = "github.com/json-iterator/go" @@ -110,26 +268,51 @@ version = "v1.1.6" [[projects]] - digest = "1:dc0a39678daf0917baea389ba91ba38b3eb4c9d199f0808ac2c416c65aa6357d" + digest = "1:9a670ce0109361c513e7103c478dbbea261bb81d41d0f9c822707451bce139bb" + name = "github.com/knative/caching" + packages = [ + "pkg/apis/caching", + "pkg/apis/caching/v1alpha1", + "pkg/client/clientset/versioned", + "pkg/client/clientset/versioned/scheme", + "pkg/client/clientset/versioned/typed/caching/v1alpha1", + ] + pruneopts = "T" + revision = "3fc06fd3c9880a9ebb5c401f4b20cf6666cc7bc0" + +[[projects]] + digest = "1:33afd816eb96416e5d7c0a64c1c6412e277720faa03c90739ce1d1fb1764b8de" name = "github.com/knative/pkg" packages = [ "apis", "apis/duck", "apis/duck/v1beta1", + "changeset", + "codegen/cmd/injection-gen", + "codegen/cmd/injection-gen/args", + "codegen/cmd/injection-gen/generators", "configmap", + "controller", + "injection", + "injection/clients/kubeclient", + "injection/sharedmain", + "kmeta", "kmp", + "logging", + "logging/logkey", + "metrics", + "metrics/metricskey", + "signals", + "system", ] - pruneopts = "NUT" + pruneopts = "T" revision = "68737b1b4e03d9a888e89ee2a44714a56eefd539" [[projects]] - digest = "1:cdd3c19a395031148cd55f5aa770a77c0c38429cd109563f99e9567c11f7ef04" + digest = "1:dabd27a263906435d198a5fa9237ff2b14383a1e29964b7006e6dc975134f5e6" name = "github.com/knative/test-infra" - packages = [ - "scripts", - "tools/dep-collector", - ] - pruneopts = "UT" + packages = ["tools/dep-collector"] + pruneopts = "NUT" revision = "69af8af1d3fec861ada88efc72409b3467b0588d" [[projects]] @@ -140,6 +323,14 @@ pruneopts = "NUT" revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" +[[projects]] + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "NUT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + [[projects]] digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" name = "github.com/modern-go/concurrent" @@ -156,6 +347,22 @@ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" +[[projects]] + branch = "master" + digest = "1:89da0f0574bc94cfd0ac8b59af67bf76cdd110d503df2721006b9f0492394333" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "NUT" + revision = "33fb24c13b99c46c93183c291836c573ac382536" + +[[projects]] + digest = "1:e1b94bd98c62fc2f905621fc6ba8209b7004e4513a1dfecb12a3de56ec2bb519" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "NUT" + revision = "0be1b92a6df0e4f5cb0a5d15fb7f643d0ad93ce6" + version = "v3.0.0" + [[projects]] digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24" name = "github.com/pkg/errors" @@ -164,6 +371,49 @@ revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" version = "v0.8.1" +[[projects]] + digest = "1:097cc61836050f45cbb712ae3bb45d66fba464c16b8fac09907fa3c1f753eff6" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "NUT" + revision = "4ab88e80c249ed361d3299e2930427d9ac43ef8d" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "NUT" + revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" + +[[projects]] + digest = "1:d03ca24670416dc8fccc78b05d6736ec655416ca7db0a028e8fb92cfdfe3b55e" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "NUT" + revision = "31bed53e4047fd6c510e43a941f90cb31be0972a" + version = "v0.6.0" + +[[projects]] + digest = "1:19305fc369377c111c865a7a01e11c675c57c52a932353bbd4ea360bd5b72d99" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/fs", + ] + pruneopts = "NUT" + revision = "3f98efb27840a48a7a2898ec80be07674d19f9c8" + version = "v0.0.3" + [[projects]] digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" name = "github.com/sergi/go-diff" @@ -181,20 +431,23 @@ version = "v1.0.3" [[projects]] - digest = "1:afcde805885dad730ad538ab7871d0f88ef42cb45117f54fc6d74493192fbded" + digest = "1:837bb82199978b5b4379838232da434ba3a3a6058bccf7e444f8a926ce1a2937" name = "github.com/tektoncd/pipeline" packages = [ "pkg/apis/config", "pkg/apis/pipeline", "pkg/apis/pipeline/v1alpha1", + "pkg/client/clientset/versioned", + "pkg/client/clientset/versioned/scheme", + "pkg/client/clientset/versioned/typed/pipeline/v1alpha1", + "pkg/client/injection/client", "pkg/list", "pkg/merge", "pkg/names", "pkg/templating", ] pruneopts = "NUT" - revision = "2c9bbc54409f757da8b7f87e343420b8b7306320" - version = "v0.5.0" + revision = "0ee3b1491aa5b38d48b4547eeb4c87d11852b3a3" [[projects]] digest = "1:5558ceef0c045f9c8a0c08c6e0de89bb0895b1aad69d9722b3748f003564b8cb" @@ -203,6 +456,63 @@ pruneopts = "UT" revision = "a51e87c5261fdb718470c077c155e070aca690a8" +[[projects]] + digest = "1:69a97603fe8952de86ee1e74a065f7974ec7d7d1d2301d3f5da6d342156363f4" + name = "go.opencensus.io" + packages = [ + ".", + "exporter/prometheus", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "NUT" + revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38" + version = "v0.20.2" + +[[projects]] + digest = "1:cc9d86ec4e6e3bdf87e3a421273bfeed003cf8e21351c0302fe8b0eb7b10efe6" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "NUT" + revision = "df976f2515e274675050de7b3f42545de80594fd" + version = "v1.4.0" + +[[projects]] + digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "NUT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:85674ac609b704fd4e9f463553b6ffc3a3527a993ae0ba550eb56beaabdfe094" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + ] + pruneopts = "NUT" + revision = "67bc79d13d155c02fd008f721863ff8cc5f30659" + [[projects]] branch = "master" digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4" @@ -213,7 +523,7 @@ [[projects]] branch = "master" - digest = "1:aecf1e72931b21a6cb28d8158926b2dc28f6162cbdfece2c988ce8c383684720" + digest = "1:5578b99717f08e6480d7e0480f758749c12f9cc5da19a33a863dc7307fd699fb" name = "golang.org/x/net" packages = [ "context", @@ -222,31 +532,44 @@ "http2", "http2/hpack", "idna", + "internal/timeseries", + "trace", ] pruneopts = "NUT" revision = "da137c7871d730100384dbcf36e6f8fa493aef5b" [[projects]] branch = "master" - digest = "1:f3a2e6d7423b8c19cdb2203cda9672900cc43012ea69f30ff6874dd453f44aec" + digest = "1:1519760444b90c560eb01373869bc66fd539e6fe1bf77af22047c43edc40ab35" name = "golang.org/x/oauth2" packages = [ ".", + "google", "internal", + "jws", + "jwt", ] pruneopts = "NUT" revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33" [[projects]] branch = "master" - digest = "1:04a5fe5e30285936ae29208ad5752b5ad761f2638f96a45daa5fe0eee34a3547" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "NUT" + revision = "112230192c580c3556b8cee6403af37a4fc5f28c" + +[[projects]] + branch = "master" + digest = "1:5f0606c755c423ee9970d55028e0ee09df03e33297a39d6b83c2502dc9a2193f" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "NUT" - revision = "04f50cda93cbb67f2afa353c52f342100e80e625" + revision = "fae7ac547cb717d141c433a2a173315e216b64c4" [[projects]] digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" @@ -283,7 +606,7 @@ [[projects]] branch = "master" - digest = "1:7026eea9c10a8cfaa11a68d8e15259f09a1706dce5b9d736aa032991b5a2749d" + digest = "1:2fc17e91d2a0f618e5e39cd8f9cc5d59a1abcb0949459a3e8655b5562972d843" name = "golang.org/x/tools" packages = [ "go/ast/astutil", @@ -300,7 +623,7 @@ "internal/semver", ] pruneopts = "NUT" - revision = "2214986f1668163f6c7315bcfa4a6496af4c3829" + revision = "e98af230987614b97d20a34f8062bf3fa0fe2376" [[projects]] branch = "master" @@ -315,53 +638,113 @@ [[projects]] branch = "master" - digest = "1:843800a8f157ca2fe7e936153d911fc647573207f4e90b6886a656b85672ebab" - name = "gonum.org/v1/gonum" - packages = [ - "blas", - "blas/blas64", - "blas/cblas128", - "blas/gonum", - "floats", - "graph", - "graph/internal/linear", - "graph/internal/ordered", - "graph/internal/set", - "graph/internal/uid", - "graph/iterator", - "graph/simple", - "graph/topo", - "graph/traverse", - "internal/asm/c128", - "internal/asm/c64", - "internal/asm/f32", - "internal/asm/f64", - "internal/cmplx64", - "internal/math32", - "lapack", - "lapack/gonum", - "lapack/lapack64", - "mat", - ] - pruneopts = "NUT" - revision = "4340aa3071a0acb819e023bd5cd455c714a1f154" - -[[projects]] - digest = "1:372cd8eba449f9b6db06677d0e73fa193ec5b19aaee148f355503ab6127045ca" + digest = "1:9b9245bd124d95af7072487cd1e5861174b859ebc31cbe9fbab3b88456701485" + name = "google.golang.org/api" + packages = [ + "googleapi/transport", + "internal", + "iterator", + "option", + "support/bundler", + "transport", + "transport/grpc", + "transport/http", + "transport/http/internal/propagation", + ] + pruneopts = "NUT" + revision = "aa15faf3c8a1cffc77fc3dabe95703bb12c5f6a9" + +[[projects]] + digest = "1:a955e7c44c2be14b61aa2ddda744edfdfbc6817e993703a16e303c277ba84449" name = "google.golang.org/appengine" packages = [ + ".", "internal", + "internal/app_identity", "internal/base", "internal/datastore", "internal/log", + "internal/modules", "internal/remote_api", + "internal/socket", "internal/urlfetch", + "socket", "urlfetch", ] pruneopts = "NUT" revision = "b2f4a3cf3c67576a2ee09e1fe62656a5086ce880" version = "v1.6.1" +[[projects]] + digest = "1:f13a21652a31a08931795226c4506ed9fa3be54a53ac09556ffcb74c1a24bc52" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/annotations", + "googleapis/api/distribution", + "googleapis/api/label", + "googleapis/api/metric", + "googleapis/api/monitoredres", + "googleapis/devtools/cloudtrace/v2", + "googleapis/monitoring/v3", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "NUT" + revision = "fc2db5cae922db113989ef87b5b859cc8a179f59" + +[[projects]] + digest = "1:89cc3cf640fa24f7345509981e7ab088ee8d4d4f08cf3b5783508856b146b438" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/grpclb", + "balancer/grpclb/grpc_lb_v1", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/alts", + "credentials/alts/internal", + "credentials/alts/internal/authinfo", + "credentials/alts/internal/conn", + "credentials/alts/internal/handshaker", + "credentials/alts/internal/handshaker/service", + "credentials/alts/internal/proto/grpc_gcp", + "credentials/google", + "credentials/internal", + "credentials/oauth", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "NUT" + revision = "1d89a3c832915b2314551c1d2a506874d62e53f7" + version = "v1.22.0" + [[projects]] digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" @@ -379,14 +762,14 @@ version = "v2.2.2" [[projects]] - digest = "1:759855d98a898d7d6a091b89b9c85c03d6efd673c44e15561d716543f4e88f31" + digest = "1:4485f6050feae6844efd79bce3f5b35e5ed4a21dd79ef6a2dbbee263531cea09" name = "k8s.io/api" packages = [ + "admissionregistration/v1alpha1", "admissionregistration/v1beta1", "apps/v1", "apps/v1beta1", "apps/v1beta2", - "auditregistration/v1alpha1", "authentication/v1", "authentication/v1beta1", "authorization/v1", @@ -398,20 +781,15 @@ "batch/v1beta1", "batch/v2alpha1", "certificates/v1beta1", - "coordination/v1", "coordination/v1beta1", "core/v1", "events/v1beta1", "extensions/v1beta1", "networking/v1", - "networking/v1beta1", - "node/v1alpha1", - "node/v1beta1", "policy/v1beta1", "rbac/v1", "rbac/v1alpha1", "rbac/v1beta1", - "scheduling/v1", "scheduling/v1alpha1", "scheduling/v1beta1", "settings/v1alpha1", @@ -420,11 +798,11 @@ "storage/v1beta1", ] pruneopts = "NUT" - revision = "7cf5895f2711098d7d9527db0a4a49fb0dff7de2" - version = "kubernetes-1.15.0" + revision = "145d52631d00cbfe68490d19ae4f0f501fd31a95" + version = "kubernetes-1.12.6" [[projects]] - digest = "1:4e13eee8921f9694bf834623009617d528c0f655e6237d0839a4af0954826dc9" + digest = "1:6423241a07f1586d8a2e30ce39332b0e7d938f8612a3b3cc2befd8cd4fd4f34a" name = "k8s.io/apimachinery" packages = [ "pkg/api/equality", @@ -475,11 +853,11 @@ "third_party/forked/golang/reflect", ] pruneopts = "NUT" - revision = "1799e75a07195de9460b8ef7300883499f12127b" - version = "kubernetes-1.15.0" + revision = "01f179d85dbce0f2e0e4351a92394b38694b7cae" + version = "kubernetes-1.12.6" [[projects]] - digest = "1:dd1c8d0d968ddb1ca912d960432095bbfed27149d902f57dff17a61aac5c5339" + digest = "1:3ca7df0bbf551a81b152726b5e939a757c193d92999e8cc68059f565b1759151" name = "k8s.io/client-go" packages = [ "discovery", @@ -487,13 +865,12 @@ "dynamic", "informers", "informers/admissionregistration", + "informers/admissionregistration/v1alpha1", "informers/admissionregistration/v1beta1", "informers/apps", "informers/apps/v1", "informers/apps/v1beta1", "informers/apps/v1beta2", - "informers/auditregistration", - "informers/auditregistration/v1alpha1", "informers/autoscaling", "informers/autoscaling/v1", "informers/autoscaling/v2beta1", @@ -505,7 +882,6 @@ "informers/certificates", "informers/certificates/v1beta1", "informers/coordination", - "informers/coordination/v1", "informers/coordination/v1beta1", "informers/core", "informers/core/v1", @@ -516,10 +892,6 @@ "informers/internalinterfaces", "informers/networking", "informers/networking/v1", - "informers/networking/v1beta1", - "informers/node", - "informers/node/v1alpha1", - "informers/node/v1beta1", "informers/policy", "informers/policy/v1beta1", "informers/rbac", @@ -527,7 +899,6 @@ "informers/rbac/v1alpha1", "informers/rbac/v1beta1", "informers/scheduling", - "informers/scheduling/v1", "informers/scheduling/v1alpha1", "informers/scheduling/v1beta1", "informers/settings", @@ -538,11 +909,11 @@ "informers/storage/v1beta1", "kubernetes", "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", "kubernetes/typed/admissionregistration/v1beta1", "kubernetes/typed/apps/v1", "kubernetes/typed/apps/v1beta1", "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/auditregistration/v1alpha1", "kubernetes/typed/authentication/v1", "kubernetes/typed/authentication/v1beta1", "kubernetes/typed/authorization/v1", @@ -554,31 +925,26 @@ "kubernetes/typed/batch/v1beta1", "kubernetes/typed/batch/v2alpha1", "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/coordination/v1", "kubernetes/typed/coordination/v1beta1", "kubernetes/typed/core/v1", "kubernetes/typed/events/v1beta1", "kubernetes/typed/extensions/v1beta1", "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1beta1", - "kubernetes/typed/node/v1alpha1", - "kubernetes/typed/node/v1beta1", "kubernetes/typed/policy/v1beta1", "kubernetes/typed/rbac/v1", "kubernetes/typed/rbac/v1alpha1", "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1", "kubernetes/typed/scheduling/v1alpha1", "kubernetes/typed/scheduling/v1beta1", "kubernetes/typed/settings/v1alpha1", "kubernetes/typed/storage/v1", "kubernetes/typed/storage/v1alpha1", "kubernetes/typed/storage/v1beta1", + "listers/admissionregistration/v1alpha1", "listers/admissionregistration/v1beta1", "listers/apps/v1", "listers/apps/v1beta1", "listers/apps/v1beta2", - "listers/auditregistration/v1alpha1", "listers/autoscaling/v1", "listers/autoscaling/v2beta1", "listers/autoscaling/v2beta2", @@ -586,20 +952,15 @@ "listers/batch/v1beta1", "listers/batch/v2alpha1", "listers/certificates/v1beta1", - "listers/coordination/v1", "listers/coordination/v1beta1", "listers/core/v1", "listers/events/v1beta1", "listers/extensions/v1beta1", "listers/networking/v1", - "listers/networking/v1beta1", - "listers/node/v1alpha1", - "listers/node/v1beta1", "listers/policy/v1beta1", "listers/rbac/v1", "listers/rbac/v1alpha1", "listers/rbac/v1beta1", - "listers/scheduling/v1", "listers/scheduling/v1alpha1", "listers/scheduling/v1beta1", "listers/settings/v1alpha1", @@ -614,28 +975,34 @@ "rest", "rest/watch", "testing", + "tools/auth", "tools/cache", + "tools/clientcmd", "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", "tools/metrics", "tools/pager", + "tools/record", "tools/reference", "transport", + "util/buffer", "util/cert", "util/connrotation", "util/flowcontrol", - "util/keyutil", + "util/homedir", + "util/integer", "util/retry", + "util/workqueue", ] pruneopts = "NUT" - revision = "78d2af792babf2dd937ba2e2a8d99c753a5eda89" - version = "kubernetes-1.15.0" + revision = "78295b709ec6fa5be12e35892477a326dea2b5d3" + version = "kubernetes-1.12.6" [[projects]] - branch = "master" - digest = "1:1b69a86bacaec337d5e52ac45fa9b12f100c55fc94b51c86f035acf520ddeac7" + digest = "1:26b81b5e76e3f84ea5140da4f74649576e470f79091d2ef8e0d1b5000bc636ca" name = "k8s.io/code-generator" packages = [ - ".", "cmd/client-gen", "cmd/client-gen/args", "cmd/client-gen/generators", @@ -644,43 +1011,29 @@ "cmd/client-gen/generators/util", "cmd/client-gen/path", "cmd/client-gen/types", - "cmd/conversion-gen", - "cmd/conversion-gen/args", - "cmd/conversion-gen/generators", "cmd/deepcopy-gen", "cmd/deepcopy-gen/args", "cmd/defaulter-gen", "cmd/defaulter-gen/args", - "cmd/go-to-protobuf", - "cmd/go-to-protobuf/protobuf", - "cmd/import-boss", "cmd/informer-gen", "cmd/informer-gen/args", "cmd/informer-gen/generators", "cmd/lister-gen", "cmd/lister-gen/args", "cmd/lister-gen/generators", - "cmd/register-gen", - "cmd/register-gen/args", - "cmd/register-gen/generators", - "cmd/set-gen", - "pkg/namer", "pkg/util", - "third_party/forked/golang/reflect", ] pruneopts = "T" - revision = "42c1e9a4dc7aa81b425ecccd00aaa2ee6cfaa943" + revision = "b1289fc74931d4b6b04bd1a259acfc88a2cb0a66" + version = "kubernetes-1.12.6" [[projects]] - branch = "master" - digest = "1:042764790129d530ed2db8ab2d593beb7ecd7dee51136122912101e55b601211" + digest = "1:39912eb5f8eaf46486faae0839586c27c93423e552f76875defa048f52c15c15" name = "k8s.io/gengo" packages = [ "args", "examples/deepcopy-gen/generators", "examples/defaulter-gen/generators", - "examples/import-boss/generators", - "examples/set-gen/generators", "examples/set-gen/sets", "generator", "namer", @@ -691,12 +1044,12 @@ revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" [[projects]] - digest = "1:43099cc4ed575c40f80277c7ba7168df37d0c663bdc4f541325430bd175cce8a" + digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a" name = "k8s.io/klog" packages = ["."] pruneopts = "NUT" - revision = "d98d8acdac006fb39831f1b25640813fef9c314f" - version = "v0.3.3" + revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" + version = "v0.2.0" [[projects]] branch = "master" @@ -704,37 +1057,29 @@ name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] pruneopts = "NUT" - revision = "db7b694dc208eead64d38030265f702db593fcf2" - -[[projects]] - branch = "master" - digest = "1:03ce1a3e8094febc17dfaf3bfc7a445fb964e4fa96d3443822505dfc8567b648" - name = "k8s.io/utils" - packages = [ - "buffer", - "integer", - "trace", - ] - pruneopts = "NUT" - revision = "c55fbcfc754a5b2ec2fbae8fb9dcac36bdba6a12" - -[[projects]] - digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" - name = "sigs.k8s.io/yaml" - packages = ["."] - pruneopts = "NUT" - revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" - version = "v1.1.0" + revision = "33be087ad058f99c78e067996202b60230737e49" [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "github.com/knative/caching/pkg/client/clientset/versioned", "github.com/knative/pkg/apis", - "github.com/knative/test-infra/scripts", + "github.com/knative/pkg/codegen/cmd/injection-gen", + "github.com/knative/pkg/configmap", + "github.com/knative/pkg/controller", + "github.com/knative/pkg/injection", + "github.com/knative/pkg/injection/clients/kubeclient", + "github.com/knative/pkg/injection/sharedmain", + "github.com/knative/pkg/logging", + "github.com/knative/pkg/logging/logkey", "github.com/knative/test-infra/tools/dep-collector", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1", + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned", + "github.com/tektoncd/pipeline/pkg/client/injection/client", "github.com/tektoncd/plumbing/scripts", + "go.uber.org/zap", + "k8s.io/api/core/v1", "k8s.io/apimachinery/pkg/api/errors", "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/labels", @@ -746,22 +1091,19 @@ "k8s.io/apimachinery/pkg/watch", "k8s.io/client-go/discovery", "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/core/v1", "k8s.io/client-go/rest", "k8s.io/client-go/testing", "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/record", "k8s.io/client-go/util/flowcontrol", - "k8s.io/code-generator", "k8s.io/code-generator/cmd/client-gen", "k8s.io/code-generator/cmd/deepcopy-gen", "k8s.io/code-generator/cmd/defaulter-gen", "k8s.io/code-generator/cmd/informer-gen", "k8s.io/code-generator/cmd/lister-gen", - "k8s.io/gengo/args", - "k8s.io/gengo/examples/deepcopy-gen/generators", - "k8s.io/gengo/examples/defaulter-gen/generators", - "k8s.io/gengo/generator", - "k8s.io/gengo/namer", - "k8s.io/gengo/types", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 130a644dd..083b2f8f2 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -2,36 +2,28 @@ # for detailed Gopkg.toml documentation. required = [ - "github.com/tektoncd/plumbing/scripts", - "github.com/knative/test-infra/scripts", - "github.com/knative/test-infra/tools/dep-collector", - "k8s.io/gengo/examples/defaulter-gen/generators", - "k8s.io/gengo/args", - "k8s.io/gengo/generator", - "k8s.io/gengo/namer", - "k8s.io/gengo/types", - "k8s.io/gengo/examples/deepcopy-gen/generators", - "k8s.io/code-generator", "k8s.io/code-generator/cmd/deepcopy-gen", "k8s.io/code-generator/cmd/defaulter-gen", "k8s.io/code-generator/cmd/client-gen", "k8s.io/code-generator/cmd/lister-gen", "k8s.io/code-generator/cmd/informer-gen", + "github.com/knative/pkg/codegen/cmd/injection-gen", + "github.com/tektoncd/plumbing/scripts", + "github.com/knative/test-infra/tools/dep-collector", ] [[constraint]] - name = "github.com/tektoncd/plumbing" - # HEAD as of 2019-06-24 - revision = "a51e87c5261fdb718470c077c155e070aca690a8" + name = "github.com/tektoncd/pipeline" + # HEAD as of 2019-07-15 + revision = "0ee3b1491aa5b38d48b4547eeb4c87d11852b3a3" -[[constraint]] - name = "github.com/knative/test-infra" - # HEAD as of 2019-06-25 - revision = "69af8af1d3fec861ada88efc72409b3467b0588d" +[[override]] + name = "k8s.io/klog" + version = "v0.2.0" -[[constraint]] - name = "github.com/tektoncd/pipeline" - version = "0.5.0" +[[override]] + name = "k8s.io/gengo" + revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" [[override]] name = "gopkg.in/yaml.v2" @@ -39,35 +31,85 @@ required = [ [[override]] name = "k8s.io/api" - version = "kubernetes-1.15.0" + version = "kubernetes-1.12.6" [[override]] name = "k8s.io/apimachinery" - version = "kubernetes-1.15.0" + version = "kubernetes-1.12.6" + +[[override]] + name = "k8s.io/code-generator" + version = "kubernetes-1.12.6" [[override]] name = "k8s.io/client-go" - version = "kubernetes-1.15.0" + version = "kubernetes-1.12.6" + +[[override]] + name = "github.com/golang/protobuf" + # Lock the version of protobuf to keep things building. + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + +[[constraint]] + name = "github.com/knative/caching" + # HEAD as of 2019-03-21 + revision = "3fc06fd3c9880a9ebb5c401f4b20cf6666cc7bc0" + +[[constraint]] + name = "github.com/tektoncd/plumbing" + # HEAD as of 2019-06-24 + revision = "a51e87c5261fdb718470c077c155e070aca690a8" + +[[constraint]] + name = "github.com/knative/test-infra" + # HEAD as of 2019-06-25 + revision = "69af8af1d3fec861ada88efc72409b3467b0588d" [[override]] name = "github.com/knative/pkg" # HEAD as of 2019-06-14 💖 revision = "68737b1b4e03d9a888e89ee2a44714a56eefd539" +[[override]] + name = "go.uber.org/zap" + revision = "67bc79d13d155c02fd008f721863ff8cc5f30659" + +[[override]] + name = "contrib.go.opencensus.io/exporter/stackdriver" + # HEAD as of 2019-02-11 + # Needed because this includes a fix to support Stackdriver built-in metrics + revision = "c06c82c832edca4eaf7b0241bd655560a1be0346" + +[[override]] + name = "google.golang.org/genproto" + # Lock the version of proto package to v2 to keep things building. + revision = "fc2db5cae922db113989ef87b5b859cc8a179f59" + +[[override]] + name = "github.com/census-instrumentation/opencensus-proto" + # Lock the version of proto package to v2 to keep things building. + version = "v0.1.0" + [prune] go-tests = true unused-packages = true non-go = true [[prune.project]] - name = "github.com/tektoncd/plumbing" + name = "k8s.io/code-generator" + unused-packages = false non-go = false [[prune.project]] - name = "github.com/knative/test-infra" + name = "github.com/knative/caching" + unused-packages = false non-go = false [[prune.project]] - name = "k8s.io/code-generator" + name = "github.com/tektoncd/plumbing" + non-go = false + + [[prune.project]] + name = "github.com/knative/pkg" unused-packages = false non-go = false diff --git a/cmd/controller/kodata/LICENSE b/cmd/controller/kodata/LICENSE new file mode 120000 index 000000000..5853aaea5 --- /dev/null +++ b/cmd/controller/kodata/LICENSE @@ -0,0 +1 @@ +../../../LICENSE \ No newline at end of file diff --git a/cmd/controller/kodata/VENDOR-LICENSE b/cmd/controller/kodata/VENDOR-LICENSE new file mode 120000 index 000000000..3cc897645 --- /dev/null +++ b/cmd/controller/kodata/VENDOR-LICENSE @@ -0,0 +1 @@ +../../../third_party/VENDOR-LICENSE \ No newline at end of file diff --git a/cmd/controller/main.go b/cmd/controller/main.go new file mode 100644 index 000000000..29dcb173b --- /dev/null +++ b/cmd/controller/main.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "github.com/knative/pkg/injection/sharedmain" + + "github.com/tektoncd/triggers/pkg/reconciler/v1alpha1/eventlistener" +) + +const ( + // ControllerLogKey is the name of the logger for the controller cmd + ControllerLogKey = "controller" +) + +func main() { + sharedmain.Main(ControllerLogKey, + eventlistener.NewController, + ) +} diff --git a/config/config-logging.yaml b/config/config-logging.yaml new file mode 100644 index 000000000..ccaf721f1 --- /dev/null +++ b/config/config-logging.yaml @@ -0,0 +1,50 @@ +# Copyright 2019 Tekton Authors LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging-triggers + namespace: tekton-pipelines +data: + # Common configuration for all knative codebase + zap-logger-config: | + { + "level": "info", + "development": false, + "sampling": { + "initial": 100, + "thereafter": 100 + }, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "", + "durationEncoder": "", + "callerEncoder": "" + } + } + + # Log level overrides + loglevel.controller: "info" + loglevel.webhook: "info" diff --git a/config/config-observability.yaml b/config/config-observability.yaml new file mode 100644 index 000000000..5d8d49064 --- /dev/null +++ b/config/config-observability.yaml @@ -0,0 +1,53 @@ +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability + namespace: tekton-pipelines + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # metrics.backend-destination field specifies the system metrics destination. + # It supports either prometheus (the default) or stackdriver. + # Note: Using stackdriver will incur additional charges + metrics.backend-destination: prometheus + + # metrics.stackdriver-project-id field specifies the stackdriver project ID. This + # field is optional. When running on GCE, application default credentials will be + # used if this field is not provided. + metrics.stackdriver-project-id: "" + + # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to + # Stackdriver using "global" resource type and custom metric type if the + # metrics are not supported by "knative_revision" resource type. Setting this + # flag to "true" could cause extra Stackdriver charge. + # If metrics.backend-destination is not Stackdriver, this is ignored. + metrics.allow-stackdriver-custom-metrics: "false" diff --git a/config/controller-service.yaml b/config/controller-service.yaml new file mode 100644 index 000000000..e3bd9c748 --- /dev/null +++ b/config/controller-service.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 Tekton Authors LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + labels: + app: tekton-triggers-controller + name: tekton-triggers-controller + namespace: tekton-pipelines +spec: + ports: + - name: metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: tekton-triggers-controller diff --git a/config/controller.yaml b/config/controller.yaml new file mode 100644 index 000000000..966779810 --- /dev/null +++ b/config/controller.yaml @@ -0,0 +1,54 @@ +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: tekton-triggers-controller + namespace: tekton-pipelines +spec: + replicas: 1 + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-triggers-controller + spec: + serviceAccountName: tekton-triggers-controller + containers: + - name: tekton-triggers-controller + image: github.com/tektoncd/triggers/cmd/controller + args: [ + "-logtostderr", + "-stderrthreshold", "INFO", + ] + volumeMounts: + - name: config-logging + mountPath: /etc/config-logging + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging-triggers + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/triggers + volumes: + - name: config-logging + configMap: + name: config-logging-triggers \ No newline at end of file diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index f5adb46ac..8283d0a7c 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Knative Authors +# Copyright 2019 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,10 +18,12 @@ set -o errexit set -o nounset set -o pipefail -source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh +source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/library.sh CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../../../k8s.io/code-generator)} +KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 ./vendor/github.com/knative/pkg 2>/dev/null || echo ../pkg)} + # generate the code with: # --output-base because this script should also be able to run inside the vendor dir of # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir @@ -31,5 +33,11 @@ ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ triggers:v1alpha1 \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +# Knative Injection +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + github.com/tektoncd/triggers/pkg/client github.com/tektoncd/triggers/pkg/apis \ + "triggers:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + # Make sure our dependencies are up-to-date ${REPO_ROOT_DIR}/hack/update-deps.sh diff --git a/hack/update-deps.sh b/hack/update-deps.sh index 835857066..b7d6b1f88 100755 --- a/hack/update-deps.sh +++ b/hack/update-deps.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Knative Authors +# Copyright 2019 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh +source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/library.sh cd ${REPO_ROOT_DIR} diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh index 1d642f4c1..3f2499514 100755 --- a/hack/verify-codegen.sh +++ b/hack/verify-codegen.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2018 The Knative Authors +# Copyright 2019 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh +source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/library.sh readonly TMP_DIFFROOT="$(mktemp -d ${REPO_ROOT_DIR}/tmpdiffroot.XXXXXX)" diff --git a/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go index 5859ed7ea..1f95608b4 100644 --- a/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *EventListener) DeepCopyObject() runtime.Object { func (in *EventListenerList) DeepCopyInto(out *EventListenerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]EventListener, len(*in)) @@ -162,7 +162,7 @@ func (in *TriggerBinding) DeepCopyObject() runtime.Object { func (in *TriggerBindingList) DeepCopyInto(out *TriggerBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TriggerBinding, len(*in)) @@ -262,7 +262,7 @@ func (in *TriggerTemplate) DeepCopyObject() runtime.Object { func (in *TriggerTemplateList) DeepCopyInto(out *TriggerTemplateList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TriggerTemplate, len(*in)) diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 5f85d4f40..fcc8196e5 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -28,6 +28,8 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface TriggersV1alpha1() triggersv1alpha1.TriggersV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Triggers() triggersv1alpha1.TriggersV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -42,6 +44,12 @@ func (c *Clientset) TriggersV1alpha1() triggersv1alpha1.TriggersV1alpha1Interfac return c.triggersV1alpha1 } +// Deprecated: Triggers retrieves the default version of TriggersClient. +// Please explicitly pick a version. +func (c *Clientset) Triggers() triggersv1alpha1.TriggersV1alpha1Interface { + return c.triggersV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index bca7d00a7..8c7666ea4 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -41,7 +41,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{tracker: o} + cs := &Clientset{} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -63,20 +63,20 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - var _ clientset.Interface = &Clientset{} // TriggersV1alpha1 retrieves the TriggersV1alpha1Client func (c *Clientset) TriggersV1alpha1() triggersv1alpha1.TriggersV1alpha1Interface { return &faketriggersv1alpha1.FakeTriggersV1alpha1{Fake: &c.Fake} } + +// Triggers retrieves the TriggersV1alpha1Client +func (c *Clientset) Triggers() triggersv1alpha1.TriggersV1alpha1Interface { + return &faketriggersv1alpha1.FakeTriggersV1alpha1{Fake: &c.Fake} +} diff --git a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/eventlistener.go b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/eventlistener.go index 535ed25d0..b59ea7374 100644 --- a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/eventlistener.go +++ b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/eventlistener.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" scheme "github.com/tektoncd/triggers/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -78,16 +76,11 @@ func (c *eventListeners) Get(name string, options v1.GetOptions) (result *v1alph // List takes label and field selectors, and returns the list of EventListeners that match those selectors. func (c *eventListeners) List(opts v1.ListOptions) (result *v1alpha1.EventListenerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.EventListenerList{} err = c.client.Get(). Namespace(c.ns). Resource("eventlisteners"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *eventListeners) List(opts v1.ListOptions) (result *v1alpha1.EventListen // Watch returns a watch.Interface that watches the requested eventListeners. func (c *eventListeners) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("eventlisteners"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *eventListeners) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *eventListeners) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("eventlisteners"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_eventlistener.go b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_eventlistener.go index d49cd2ca4..b010c644b 100644 --- a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_eventlistener.go +++ b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_eventlistener.go @@ -131,7 +131,7 @@ func (c *FakeEventListeners) DeleteCollection(options *v1.DeleteOptions, listOpt // Patch applies the patch and returns the patched eventListener. func (c *FakeEventListeners) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EventListener, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventlistenersResource, c.ns, name, pt, data, subresources...), &v1alpha1.EventListener{}) + Invokes(testing.NewPatchSubresourceAction(eventlistenersResource, c.ns, name, data, subresources...), &v1alpha1.EventListener{}) if obj == nil { return nil, err diff --git a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggerbinding.go b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggerbinding.go index 58ea37fe7..986e26a4d 100644 --- a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggerbinding.go +++ b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggerbinding.go @@ -131,7 +131,7 @@ func (c *FakeTriggerBindings) DeleteCollection(options *v1.DeleteOptions, listOp // Patch applies the patch and returns the patched triggerBinding. func (c *FakeTriggerBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TriggerBinding, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(triggerbindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.TriggerBinding{}) + Invokes(testing.NewPatchSubresourceAction(triggerbindingsResource, c.ns, name, data, subresources...), &v1alpha1.TriggerBinding{}) if obj == nil { return nil, err diff --git a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggertemplate.go b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggertemplate.go index ce625b248..5b92146bd 100644 --- a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggertemplate.go +++ b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/fake/fake_triggertemplate.go @@ -131,7 +131,7 @@ func (c *FakeTriggerTemplates) DeleteCollection(options *v1.DeleteOptions, listO // Patch applies the patch and returns the patched triggerTemplate. func (c *FakeTriggerTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TriggerTemplate, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(triggertemplatesResource, c.ns, name, pt, data, subresources...), &v1alpha1.TriggerTemplate{}) + Invokes(testing.NewPatchSubresourceAction(triggertemplatesResource, c.ns, name, data, subresources...), &v1alpha1.TriggerTemplate{}) if obj == nil { return nil, err diff --git a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggerbinding.go b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggerbinding.go index 49fec7e01..7dd3e92c2 100644 --- a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggerbinding.go +++ b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggerbinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" scheme "github.com/tektoncd/triggers/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -78,16 +76,11 @@ func (c *triggerBindings) Get(name string, options v1.GetOptions) (result *v1alp // List takes label and field selectors, and returns the list of TriggerBindings that match those selectors. func (c *triggerBindings) List(opts v1.ListOptions) (result *v1alpha1.TriggerBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.TriggerBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("triggerbindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *triggerBindings) List(opts v1.ListOptions) (result *v1alpha1.TriggerBin // Watch returns a watch.Interface that watches the requested triggerBindings. func (c *triggerBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("triggerbindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *triggerBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *triggerBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("triggerbindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggers_client.go b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggers_client.go index cfe7eaeed..c3532ce72 100644 --- a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggers_client.go +++ b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggers_client.go @@ -21,6 +21,7 @@ package v1alpha1 import ( v1alpha1 "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" "github.com/tektoncd/triggers/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) @@ -80,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggertemplate.go b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggertemplate.go index b5ad84b6a..1e7ecb3a3 100644 --- a/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggertemplate.go +++ b/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggertemplate.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1" scheme "github.com/tektoncd/triggers/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -78,16 +76,11 @@ func (c *triggerTemplates) Get(name string, options v1.GetOptions) (result *v1al // List takes label and field selectors, and returns the list of TriggerTemplates that match those selectors. func (c *triggerTemplates) List(opts v1.ListOptions) (result *v1alpha1.TriggerTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.TriggerTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("triggertemplates"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *triggerTemplates) List(opts v1.ListOptions) (result *v1alpha1.TriggerTe // Watch returns a watch.Interface that watches the requested triggerTemplates. func (c *triggerTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("triggertemplates"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *triggerTemplates) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *triggerTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("triggertemplates"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 140fa03a2..a6ee3c2fc 100644 --- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -27,7 +27,6 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -36,5 +35,4 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/pkg/client/injection/client/client.go b/pkg/client/injection/client/client.go new file mode 100644 index 000000000..49bcd9760 --- /dev/null +++ b/pkg/client/injection/client/client.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + "context" + + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + versioned "github.com/tektoncd/triggers/pkg/client/clientset/versioned" + rest "k8s.io/client-go/rest" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (versioned.Interface)(nil)) + } + return untyped.(versioned.Interface) +} diff --git a/pkg/client/injection/client/fake/fake.go b/pkg/client/injection/client/fake/fake.go new file mode 100644 index 000000000..f00cdeaec --- /dev/null +++ b/pkg/client/injection/client/fake/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + fake "github.com/tektoncd/triggers/pkg/client/clientset/versioned/fake" + client "github.com/tektoncd/triggers/pkg/client/injection/client" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, client.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(client.Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (*fake.Clientset)(nil)) + } + return untyped.(*fake.Clientset) +} diff --git a/pkg/client/injection/informers/triggers/factory/fake/fake.go b/pkg/client/injection/informers/triggers/factory/fake/fake.go new file mode 100644 index 000000000..97dd8762c --- /dev/null +++ b/pkg/client/injection/informers/triggers/factory/fake/fake.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + externalversions "github.com/tektoncd/triggers/pkg/client/informers/externalversions" + fake "github.com/tektoncd/triggers/pkg/client/injection/client/fake" + factory "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/factory" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} diff --git a/pkg/client/injection/informers/triggers/factory/triggersfactory.go b/pkg/client/injection/informers/triggers/factory/triggersfactory.go new file mode 100644 index 000000000..89bd6ec76 --- /dev/null +++ b/pkg/client/injection/informers/triggers/factory/triggersfactory.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package triggersfactory + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + externalversions "github.com/tektoncd/triggers/pkg/client/informers/externalversions" + client "github.com/tektoncd/triggers/pkg/client/injection/client" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (externalversions.SharedInformerFactory)(nil)) + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/pkg/client/injection/informers/triggers/v1alpha1/eventlistener/eventlistener.go b/pkg/client/injection/informers/triggers/v1alpha1/eventlistener/eventlistener.go new file mode 100644 index 000000000..0c6120088 --- /dev/null +++ b/pkg/client/injection/informers/triggers/v1alpha1/eventlistener/eventlistener.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package eventlistener + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + v1alpha1 "github.com/tektoncd/triggers/pkg/client/informers/externalversions/triggers/v1alpha1" + factory "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Triggers().V1alpha1().EventListeners() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.EventListenerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.EventListenerInformer)(nil)) + } + return untyped.(v1alpha1.EventListenerInformer) +} diff --git a/pkg/client/injection/informers/triggers/v1alpha1/eventlistener/fake/fake.go b/pkg/client/injection/informers/triggers/v1alpha1/eventlistener/fake/fake.go new file mode 100644 index 000000000..68a435a18 --- /dev/null +++ b/pkg/client/injection/informers/triggers/v1alpha1/eventlistener/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + fake "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/factory/fake" + eventlistener "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/v1alpha1/eventlistener" +) + +var Get = eventlistener.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Triggers().V1alpha1().EventListeners() + return context.WithValue(ctx, eventlistener.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/fake/fake.go b/pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/fake/fake.go new file mode 100644 index 000000000..a1e41e80c --- /dev/null +++ b/pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + fake "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/factory/fake" + triggerbinding "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/v1alpha1/triggerbinding" +) + +var Get = triggerbinding.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Triggers().V1alpha1().TriggerBindings() + return context.WithValue(ctx, triggerbinding.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/triggerbinding.go b/pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/triggerbinding.go new file mode 100644 index 000000000..e12f1034a --- /dev/null +++ b/pkg/client/injection/informers/triggers/v1alpha1/triggerbinding/triggerbinding.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package triggerbinding + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + v1alpha1 "github.com/tektoncd/triggers/pkg/client/informers/externalversions/triggers/v1alpha1" + factory "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Triggers().V1alpha1().TriggerBindings() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.TriggerBindingInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.TriggerBindingInformer)(nil)) + } + return untyped.(v1alpha1.TriggerBindingInformer) +} diff --git a/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/fake/fake.go b/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/fake/fake.go new file mode 100644 index 000000000..37f14a343 --- /dev/null +++ b/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + fake "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/factory/fake" + triggertemplate "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate" +) + +var Get = triggertemplate.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Triggers().V1alpha1().TriggerTemplates() + return context.WithValue(ctx, triggertemplate.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/triggertemplate.go b/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/triggertemplate.go new file mode 100644 index 000000000..c16d87774 --- /dev/null +++ b/pkg/client/injection/informers/triggers/v1alpha1/triggertemplate/triggertemplate.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package triggertemplate + +import ( + "context" + + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + v1alpha1 "github.com/tektoncd/triggers/pkg/client/informers/externalversions/triggers/v1alpha1" + factory "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Triggers().V1alpha1().TriggerTemplates() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.TriggerTemplateInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.TriggerTemplateInformer)(nil)) + } + return untyped.(v1alpha1.TriggerTemplateInformer) +} diff --git a/pkg/reconciler/reconciler.go b/pkg/reconciler/reconciler.go new file mode 100644 index 000000000..9fc7c9008 --- /dev/null +++ b/pkg/reconciler/reconciler.go @@ -0,0 +1,129 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "time" + + cachingclientset "github.com/knative/caching/pkg/client/clientset/versioned" + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/logging/logkey" + pipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + triggersclientset "github.com/tektoncd/triggers/pkg/client/clientset/versioned" + triggersScheme "github.com/tektoncd/triggers/pkg/client/clientset/versioned/scheme" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" +) + +// Options defines the common reconciler options. +// We define this to reduce the boilerplate argument list when +// creating our controllers. +type Options struct { + KubeClientSet kubernetes.Interface + PipelineClientSet pipelineclientset.Interface + TriggersClientSet triggersclientset.Interface + CachingClientSet cachingclientset.Interface + + ConfigMapWatcher configmap.Watcher + Logger *zap.SugaredLogger + Recorder record.EventRecorder + + ResyncPeriod time.Duration +} + +// GetTrackerLease returns a multiple of the resync period to use as the +// duration for tracker leases. This attempts to ensure that resyncs happen to +// refresh leases frequently enough that we don't miss updates to tracked +// objects. +func (o Options) GetTrackerLease() time.Duration { + return o.ResyncPeriod * 3 +} + +// Base implements the core controller logic, given a Reconciler. +type Base struct { + // KubeClientSet allows us to talk to the k8s for core APIs + KubeClientSet kubernetes.Interface + + // PipelineClientSet allows us to configure pipeline objects + PipelineClientSet pipelineclientset.Interface + + // TriggersClientSet allows us to configure triggers objects + TriggersClientSet triggersclientset.Interface + + // CachingClientSet allows us to instantiate Image objects + CachingClientSet cachingclientset.Interface + + // ConfigMapWatcher allows us to watch for ConfigMap changes. + ConfigMapWatcher configmap.Watcher + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // Sugared logger is easier to use but is not as performant as the + // raw logger. In performance critical paths, call logger.Desugar() + // and use the returned raw logger instead. In addition to the + // performance benefits, raw logger also preserves type-safety at + // the expense of slightly greater verbosity. + Logger *zap.SugaredLogger +} + +// NewBase instantiates a new instance of Base implementing +// the common & boilerplate code between our reconcilers. +func NewBase(opt Options, controllerAgentName string) *Base { + // Enrich the logs with controller name + logger := opt.Logger.Named(controllerAgentName).With(zap.String(logkey.ControllerType, controllerAgentName)) + + // Use recorder provided in options if presents. Otherwise, create a new one. + recorder := opt.Recorder + + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: opt.KubeClientSet.CoreV1().Events("")}) + + recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + } else { + logger.Debug("Using recorder from option") + } + + base := &Base{ + KubeClientSet: opt.KubeClientSet, + PipelineClientSet: opt.PipelineClientSet, + TriggersClientSet: opt.TriggersClientSet, + CachingClientSet: opt.CachingClientSet, + ConfigMapWatcher: opt.ConfigMapWatcher, + Recorder: recorder, + Logger: logger, + } + + return base +} + +func init() { + // Add triggers types to the default Kubernetes Scheme so Events can be + // logged for triggers types. + if err := triggersScheme.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } +} diff --git a/pkg/reconciler/v1alpha1/eventlistener/controller.go b/pkg/reconciler/v1alpha1/eventlistener/controller.go new file mode 100644 index 000000000..45481729d --- /dev/null +++ b/pkg/reconciler/v1alpha1/eventlistener/controller.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eventlistener + +import ( + "context" + "time" + + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection/clients/kubeclient" + "github.com/knative/pkg/logging" + pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client" + triggersclient "github.com/tektoncd/triggers/pkg/client/injection/client" + eventlistenerinformer "github.com/tektoncd/triggers/pkg/client/injection/informers/triggers/v1alpha1/eventlistener" + "github.com/tektoncd/triggers/pkg/reconciler" + "k8s.io/client-go/tools/cache" +) + +const ( + resyncPeriod = 10 * time.Hour +) + +func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + logger := logging.FromContext(ctx) + kubeclientset := kubeclient.Get(ctx) + pipelineclientset := pipelineclient.Get(ctx) + triggersclientset := triggersclient.Get(ctx) + eventListenerInformer := eventlistenerinformer.Get(ctx) + + opt := reconciler.Options{ + KubeClientSet: kubeclientset, + PipelineClientSet: pipelineclientset, + TriggersClientSet: triggersclientset, + ConfigMapWatcher: cmw, + Logger: logger, + ResyncPeriod: resyncPeriod, + } + + c := &Reconciler{ + Base: reconciler.NewBase(opt, eventListenerAgentName), + eventListenerLister: eventListenerInformer.Lister(), + } + impl := controller.NewImpl(c, c.Logger, eventListenerControllerName) + + c.Logger.Info("Setting up event handlers") + eventListenerInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: impl.Enqueue, + UpdateFunc: controller.PassNew(impl.Enqueue), + DeleteFunc: impl.Enqueue, + }) + + return impl +} diff --git a/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go b/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go new file mode 100644 index 000000000..e548b8535 --- /dev/null +++ b/pkg/reconciler/v1alpha1/eventlistener/eventlistener.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eventlistener + +import ( + "context" + + "github.com/knative/pkg/controller" + listers "github.com/tektoncd/triggers/pkg/client/listers/triggers/v1alpha1" + "github.com/tektoncd/triggers/pkg/reconciler" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/cache" +) + +const ( + // eventListenerAgentName defines logging agent name for EventListener Controller + eventListenerAgentName = "eventlistener-controller" + // eventListenerControllerName defines name for EventListener Controller + eventListenerControllerName = "EventListener" +) + +// Reconciler implements controller.Reconciler for Configuration resources. +type Reconciler struct { + *reconciler.Base + + // listers index properties about resources + eventListenerLister listers.EventListenerLister +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + c.Logger.Info("event-listener-reconcile") + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + c.Logger.Errorf("invalid resource key: %s", key) + return nil + } + + // Get the Event Listener resource with this namespace/name + original, err := c.eventListenerLister.EventListeners(namespace).Get(name) + if errors.IsNotFound(err) { + // The resource no longer exists, in which case we stop processing. + c.Logger.Infof("event listener %q in work queue no longer exists", key) + return nil + } else if err != nil { + c.Logger.Errorf("Error retreiving EventListener %q: %s", name, err) + return err + } + + // TODO: add reconcile logic + c.Logger.Infof("original: %v", original) + + return nil +} diff --git a/test/e2e-common.sh b/test/e2e-common.sh new file mode 100755 index 000000000..58368f395 --- /dev/null +++ b/test/e2e-common.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the presubmit tests; it is started by prow for each PR. +# For convenience, it can also be executed manually. +# Running the script without parameters, or with the --all-tests +# flag, causes all tests to be executed, in the right order. +# Use the flags --build-tests, --unit-tests and --integration-tests +# to run a specific set of tests. + +# Helper functions for E2E tests. + +source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh + +function install_pipeline_crd() { + echo ">> Deploying Tekton Pipelines" + kubectl apply -f https://storage.googleapis.com/tekton-releases/latest/release.yaml || fail_test "Tekton Pipeline installation failed" + + # Make sure that eveything is cleaned up in the current namespace. + for res in pipelineresources tasks pipelines taskruns pipelineruns; do + kubectl delete --ignore-not-found=true ${res}.tekton.dev --all + done + + # Wait for pods to be running in the namespaces we are deploying to + wait_until_pods_running tekton-pipelines || fail_test "Tekton Pipeline did not come up" +} + +function install_triggers_crd() { + echo ">> Deploying Tekton Triggers" + ko apply -f config/ || fail_test "Tekton Triggers installation failed" + + # Make sure that eveything is cleaned up in the current namespace. + for res in eventlistener triggertemplate triggerbinding; do + kubectl delete --ignore-not-found=true ${res}.tekton.dev --all + done + + # Wait for pods to be running in the namespaces we are deploying to + wait_until_pods_running tekton-pipelines || fail_test "Tekton Triggers did not come up" +} \ No newline at end of file diff --git a/config/triggers.yaml b/test/e2e-tests.sh old mode 100644 new mode 100755 similarity index 58% rename from config/triggers.yaml rename to test/e2e-tests.sh index 46bf1b089..5663ad33d --- a/config/triggers.yaml +++ b/test/e2e-tests.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Copyright 2019 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -11,19 +13,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: tekton-triggers-placeholder - namespace: tekton-pipelines -spec: - replicas: 1 - template: - metadata: - labels: - app: tekton-triggers-placeholder - spec: - serviceAccountName: tekton-pipelines-controller - containers: - - name: tekton-triggers-placeholder - image: github.com/tektoncd/triggers/cmd/triggers + +# This script calls out to scripts in tektoncd/plumbing to setup a cluster +# and deploy Tekton Pipelines to it for running integration tests. + +source $(dirname $0)/e2e-common.sh + +# Script entry point. + +initialize $@ + +header "Setting up environment" + +install_triggers_crd + +# Run the integration tests +header "Running Go e2e tests" +go_test_e2e -timeout=20m ./test || fail_test + +success \ No newline at end of file diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE index e69de29bb..c01443167 100644 --- a/third_party/VENDOR-LICENSE +++ b/third_party/VENDOR-LICENSE @@ -0,0 +1,7053 @@ + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/cloud.google.com/go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/contrib.go.opencensus.io/exporter/stackdriver + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/aws/aws-sdk-go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/beorn7/perks + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/census-instrumentation/opencensus-proto + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/davecgh/go-spew + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/evanphx/json-patch + +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/ghodss/yaml + +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/gogo/protobuf + +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/golang/glog + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/golang/groupcache + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/golang/protobuf + +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/google/btree + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/google/go-cmp + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/google/gofuzz + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/google/uuid + +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/googleapis/gax-go + +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/googleapis/gnostic + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/gregjones/httpcache + +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/hashicorp/golang-lru + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/imdario/mergo + +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/jmespath/go-jmespath + +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/json-iterator/go + +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/knative/caching + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/knative/pkg + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/mattbaird/jsonpatch + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/matttproud/golang_protobuf_extensions + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/modern-go/concurrent + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/modern-go/reflect2 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/peterbourgon/diskv + +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/pkg/errors + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/prometheus/client_golang + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/prometheus/client_model + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/prometheus/common + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/prometheus/procfs + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/spf13/pflag + +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/github.com/tektoncd/pipeline + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/go.opencensus.io + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/go.uber.org/atomic + +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/go.uber.org/multierr + +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/go.uber.org/zap + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/crypto + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/net + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/oauth2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/sync + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/sys + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/text + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/time + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/golang.org/x/xerrors + +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/google.golang.org/api + +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/google.golang.org/genproto + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/google.golang.org/grpc + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/gopkg.in/inf.v0 + +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/gopkg.in/yaml.v2 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/k8s.io/api + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/k8s.io/apimachinery + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/k8s.io/client-go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: github.com/tektoncd/triggers/vendor/k8s.io/kube-openapi + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/cloud.google.com/go/LICENSE similarity index 100% rename from vendor/k8s.io/utils/LICENSE rename to vendor/cloud.google.com/go/LICENSE diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 000000000..125b7033c --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,513 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go new file mode 100644 index 000000000..52bf07752 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go @@ -0,0 +1,285 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &AlertPolicyCallOptions{ + ListAlertPolicies: retry[[2]string{"default", "idempotent"}], + GetAlertPolicy: retry[[2]string{"default", "idempotent"}], + CreateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + DeleteAlertPolicy: retry[[2]string{"default", "idempotent"}], + UpdateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + } +} + +// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type AlertPolicyClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewAlertPolicyClient creates a new alert policy service client. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Stackdriver Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be "unhealthy" and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the "Monitoring" tab in +// Cloud Console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultAlertPolicyClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &AlertPolicyClient{ + conn: conn, + CallOptions: defaultAlertPolicyCallOptions(), + + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListAlertPolicies lists the existing alerting policies for the project. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + var resp *monitoringpb.ListAlertPoliciesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.AlertPolicies, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateAlertPolicy creates a new alerting policy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteAlertPolicy deletes an alerting policy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", req.GetAlertPolicy().GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go new file mode 100644 index 000000000..c51fd2557 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -0,0 +1,105 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Stackdriver Monitoring API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Manages your Stackdriver Monitoring data and configurations. Most projects +// must be associated with a Stackdriver account, with a few exceptions as +// noted on the individual method pages. +// +// Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit godoc.org/cloud.google.com/go. +package monitoring // import "cloud.google.com/go/monitoring/apiv3" + +import ( + "context" + "runtime" + "strings" + "unicode" + + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} + +const versionClient = "20190626" diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go new file mode 100644 index 000000000..788d44cf5 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -0,0 +1,370 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &GroupCallOptions{ + ListGroups: retry[[2]string{"default", "idempotent"}], + GetGroup: retry[[2]string{"default", "idempotent"}], + CreateGroup: retry[[2]string{"default", "non_idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + DeleteGroup: retry[[2]string{"default", "idempotent"}], + ListGroupMembers: retry[[2]string{"default", "idempotent"}], + } +} + +// GroupClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type GroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The call options for this service. + CallOptions *GroupCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewGroupClient creates a new group service client. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &GroupClient{ + conn: conn, + CallOptions: defaultGroupCallOptions(), + + groupClient: monitoringpb.NewGroupServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) + it := &GroupIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + var resp *monitoringpb.ListGroupsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Group, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", req.GetGroup().GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + var resp *monitoringpb.ListGroupMembersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Members, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go new file mode 100644 index 000000000..8e634fdb6 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -0,0 +1,469 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption +} + +func defaultMetricClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}], + ListMetricDescriptors: retry[[2]string{"default", "idempotent"}], + GetMetricDescriptor: retry[[2]string{"default", "idempotent"}], + CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}], + DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}], + ListTimeSeries: retry[[2]string{"default", "idempotent"}], + CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}], + } +} + +// MetricClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type MetricClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The call options for this service. + CallOptions *MetricCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewMetricClient creates a new metric service client. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricClient{ + conn: conn, + CallOptions: defaultMetricCallOptions(), + + metricClient: monitoringpb.NewMetricServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does +// not require a Stackdriver account. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a +// Stackdriver account. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require +// a Stackdriver account. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + var resp *monitoringpb.ListMetricDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.MetricDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver +// account. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateMetricDescriptor creates a new metric descriptor. +// User-created metric descriptors define +// custom metrics (at /monitoring/custom-metrics). +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at /monitoring/custom-metrics) can be deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListTimeSeries lists time series that match a filter. This method does not require a +// Stackdriver account. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + var resp *monitoringpb.ListTimeSeriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TimeSeries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go new file mode 100644 index 000000000..4bc112511 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go @@ -0,0 +1,385 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}], + GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}], + ListNotificationChannels: retry[[2]string{"default", "idempotent"}], + GetNotificationChannel: retry[[2]string{"default", "idempotent"}], + CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}], + } +} + +// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type NotificationChannelClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewNotificationChannelClient creates a new notification channel service client. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultNotificationChannelClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &NotificationChannelClient{ + conn: conn, + CallOptions: defaultNotificationChannelCallOptions(), + + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + var resp *monitoringpb.ListNotificationChannelDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ChannelDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + var resp *monitoringpb.ListNotificationChannelsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.NotificationChannels, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or PagerDuty service. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", req.GetNotificationChannel().GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNotificationChannel deletes a notification channel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go new file mode 100644 index 000000000..b2b514ba5 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go @@ -0,0 +1,107 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +// GroupProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func GroupProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// GroupGroupPath returns the path for the group resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/groups/%s", project, group) +// instead. +func GroupGroupPath(project, group string) string { + return "" + + "projects/" + + project + + "/groups/" + + group + + "" +} + +// MetricProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func MetricProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// MetricMetricDescriptorPath returns the path for the metric descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/metricDescriptors/%s", project, metricDescriptor) +// instead. +func MetricMetricDescriptorPath(project, metricDescriptor string) string { + return "" + + "projects/" + + project + + "/metricDescriptors/" + + metricDescriptor + + "" +} + +// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", project, monitoredResourceDescriptor) +// instead. +func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { + return "" + + "projects/" + + project + + "/monitoredResourceDescriptors/" + + monitoredResourceDescriptor + + "" +} + +// UptimeCheckProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func UptimeCheckProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// UptimeCheckUptimeCheckConfigPath returns the path for the uptime check config resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", project, uptimeCheckConfig) +// instead. +func UptimeCheckUptimeCheckConfigPath(project, uptimeCheckConfig string) string { + return "" + + "projects/" + + project + + "/uptimeCheckConfigs/" + + uptimeCheckConfig + + "" +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go new file mode 100644 index 000000000..0ce588303 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go @@ -0,0 +1,369 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. +type UptimeCheckCallOptions struct { + ListUptimeCheckConfigs []gax.CallOption + GetUptimeCheckConfig []gax.CallOption + CreateUptimeCheckConfig []gax.CallOption + UpdateUptimeCheckConfig []gax.CallOption + DeleteUptimeCheckConfig []gax.CallOption + ListUptimeCheckIps []gax.CallOption +} + +func defaultUptimeCheckClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &UptimeCheckCallOptions{ + ListUptimeCheckConfigs: retry[[2]string{"default", "idempotent"}], + GetUptimeCheckConfig: retry[[2]string{"default", "idempotent"}], + CreateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}], + UpdateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}], + DeleteUptimeCheckConfig: retry[[2]string{"default", "idempotent"}], + ListUptimeCheckIps: retry[[2]string{"default", "idempotent"}], + } +} + +// UptimeCheckClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type UptimeCheckClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + uptimeCheckClient monitoringpb.UptimeCheckServiceClient + + // The call options for this service. + CallOptions *UptimeCheckCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewUptimeCheckClient creates a new uptime check service client. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// uptime check configurations in the Stackdriver Monitoring product. An uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud Console] +// (http://console.cloud.google.com), selecting the appropriate project, +// clicking on "Monitoring" on the left-hand side to navigate to Stackdriver, +// and then clicking on "Uptime". +func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultUptimeCheckClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &UptimeCheckClient{ + conn: conn, + CallOptions: defaultUptimeCheckCallOptions(), + + uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *UptimeCheckClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *UptimeCheckClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListUptimeCheckConfigs lists the existing valid uptime check configurations for the project, +// leaving out any invalid configurations. +func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListUptimeCheckConfigs[0:len(c.CallOptions.ListUptimeCheckConfigs):len(c.CallOptions.ListUptimeCheckConfigs)], opts...) + it := &UptimeCheckConfigIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { + var resp *monitoringpb.ListUptimeCheckConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.UptimeCheckConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetUptimeCheckConfig gets a single uptime check configuration. +func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetUptimeCheckConfig[0:len(c.CallOptions.GetUptimeCheckConfig):len(c.CallOptions.GetUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateUptimeCheckConfig creates a new uptime check configuration. +func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateUptimeCheckConfig[0:len(c.CallOptions.CreateUptimeCheckConfig):len(c.CallOptions.CreateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateUptimeCheckConfig updates an uptime check configuration. You can either replace the entire +// configuration with a new one or replace only certain fields in the current +// configuration by specifying the fields to be updated via "updateMask". +// Returns the updated configuration. +func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", req.GetUptimeCheckConfig().GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateUptimeCheckConfig[0:len(c.CallOptions.UpdateUptimeCheckConfig):len(c.CallOptions.UpdateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteUptimeCheckConfig deletes an uptime check configuration. Note that this method will fail +// if the uptime check configuration is referenced by an alert policy or +// other dependent configs that would be rendered invalid by the deletion. +func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteUptimeCheckConfig[0:len(c.CallOptions.DeleteUptimeCheckConfig):len(c.CallOptions.DeleteUptimeCheckConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListUptimeCheckIps returns the list of IPs that checkers run from +func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListUptimeCheckIps[0:len(c.CallOptions.ListUptimeCheckIps):len(c.CallOptions.ListUptimeCheckIps)], opts...) + it := &UptimeCheckIpIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { + var resp *monitoringpb.ListUptimeCheckIpsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.UptimeCheckIps, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. +type UptimeCheckConfigIterator struct { + items []*monitoringpb.UptimeCheckConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { + var item *monitoringpb.UptimeCheckConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. +type UptimeCheckIpIterator struct { + items []*monitoringpb.UptimeCheckIp + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { + var item *monitoringpb.UptimeCheckIp + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckIpIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckIpIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go new file mode 100644 index 000000000..fa76ed3fc --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/doc.go @@ -0,0 +1,105 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +// Package trace is an auto-generated package for the +// Stackdriver Trace API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Sends application trace data to Stackdriver Trace for viewing. Trace data +// is +// collected for all App Engine applications by default. Trace data from +// other +// applications can be provided using this API. +// +// Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit godoc.org/cloud.google.com/go. +package trace // import "cloud.google.com/go/trace/apiv2" + +import ( + "context" + "runtime" + "strings" + "unicode" + + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} + +const versionClient = "20190626" diff --git a/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go new file mode 100644 index 000000000..80b8d40b5 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// ProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SpanPath returns the path for the span resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/traces/%s/spans/%s", project, trace, span) +// instead. +func SpanPath(project, trace, span string) string { + return "" + + "projects/" + + project + + "/traces/" + + trace + + "/spans/" + + span + + "" +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go new file mode 100644 index 000000000..a9948d240 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go @@ -0,0 +1,155 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapic-generator. DO NOT EDIT. + +package trace + +import ( + "context" + "fmt" + "time" + + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" + "google.golang.org/api/transport" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + BatchWriteSpans []gax.CallOption + CreateSpan []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + BatchWriteSpans: retry[[2]string{"default", "non_idempotent"}], + CreateSpan: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Trace API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. A single trace may +// contain span(s) from multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchWriteSpans sends new spans to new or existing traces. You cannot update +// existing spans. +func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateSpan creates a new span. +func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...) + var resp *cloudtracepb.Span + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go new file mode 100644 index 000000000..88835cc0f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go @@ -0,0 +1,33 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +// Labels represents a set of Stackdriver Monitoring labels. +type Labels struct { + m map[string]labelValue +} + +type labelValue struct { + val, desc string +} + +// Set stores a label with the given key, value and description, +// overwriting any previous values with the given key. +func (labels *Labels) Set(key, value, description string) { + if labels.m == nil { + labels.m = make(map[string]labelValue) + } + labels.m[key] = labelValue{value, description} +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go new file mode 100644 index 000000000..9b3b7bf19 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go @@ -0,0 +1,547 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +/* +The code in this file is responsible for converting OpenCensus Proto metrics +directly to Stackdriver Metrics. +*/ + +import ( + "context" + "errors" + "fmt" + "path" + + "github.com/golang/protobuf/ptypes/timestamp" + "go.opencensus.io/stats" + "go.opencensus.io/trace" + + "cloud.google.com/go/monitoring/apiv3" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + labelpb "google.golang.org/genproto/googleapis/api/label" + googlemetricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" +) + +var errNilMetric = errors.New("expecting a non-nil metric") + +type metricPayload struct { + node *commonpb.Node + resource *resourcepb.Resource + metric *metricspb.Metric +} + +// ExportMetric exports OpenCensus Metrics to Stackdriver Monitoring. +func (se *statsExporter) ExportMetric(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) error { + if metric == nil { + return errNilMetric + } + + payload := &metricPayload{ + metric: metric, + resource: rsc, + node: node, + } + se.protoMetricsBundler.Add(payload, 1) + + return nil +} + +func (se *statsExporter) handleMetricsUpload(payloads []*metricPayload) error { + ctx, cancel := se.o.newContextWithTimeout() + defer cancel() + + ctx, span := trace.StartSpan( + ctx, + "contrib.go.opencensus.io/exporter/stackdriver.uploadMetrics", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + + for _, payload := range payloads { + // Now create the metric descriptor remotely. + if err := se.createMetricDescriptor(ctx, payload.metric); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + return err + } + } + + var allTimeSeries []*monitoringpb.TimeSeries + for _, payload := range payloads { + tsl, err := se.protoMetricToTimeSeries(ctx, payload.node, payload.resource, payload.metric) + if err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + return err + } + allTimeSeries = append(allTimeSeries, tsl...) + } + + // Now batch timeseries up and then export. + for start, end := 0, 0; start < len(allTimeSeries); start = end { + end = start + maxTimeSeriesPerUpload + if end > len(allTimeSeries) { + end = len(allTimeSeries) + } + batch := allTimeSeries[start:end] + ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(batch) + for _, ctsreq := range ctsreql { + if err := createTimeSeries(ctx, se.c, ctsreq); err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + // TODO(@odeke-em): Don't fail fast here, perhaps batch errors? + // return err + } + } + } + + return nil +} + +func (se *statsExporter) combineTimeSeriesToCreateTimeSeriesRequest(ts []*monitoringpb.TimeSeries) (ctsreql []*monitoringpb.CreateTimeSeriesRequest) { + if len(ts) == 0 { + return nil + } + + // Since there are scenarios in which Metrics with the same Type + // can be bunched in the same TimeSeries, we have to ensure that + // we create a unique CreateTimeSeriesRequest with entirely unique Metrics + // per TimeSeries, lest we'll encounter: + // + // err: rpc error: code = InvalidArgument desc = One or more TimeSeries could not be written: + // Field timeSeries[2] had an invalid value: Duplicate TimeSeries encountered. + // Only one point can be written per TimeSeries per request.: timeSeries[2] + // + // This scenario happens when we are using the OpenCensus Agent in which multiple metrics + // are streamed by various client applications. + // See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/73 + uniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts)) + nonUniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts)) + seenMetrics := make(map[string]struct{}) + + for _, tti := range ts { + signature := tti.Metric.GetType() + if _, alreadySeen := seenMetrics[signature]; !alreadySeen { + uniqueTimeSeries = append(uniqueTimeSeries, tti) + seenMetrics[signature] = struct{}{} + } else { + nonUniqueTimeSeries = append(nonUniqueTimeSeries, tti) + } + } + + // UniqueTimeSeries can be bunched up together + // While for each nonUniqueTimeSeries, we have + // to make a unique CreateTimeSeriesRequest. + ctsreql = append(ctsreql, &monitoringpb.CreateTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(se.o.ProjectID), + TimeSeries: uniqueTimeSeries, + }) + + // Now recursively also combine the non-unique TimeSeries + // that were singly added to nonUniqueTimeSeries. + // The reason is that we need optimal combinations + // for optimal combinations because: + // * "a/b/c" + // * "a/b/c" + // * "x/y/z" + // * "a/b/c" + // * "x/y/z" + // * "p/y/z" + // * "d/y/z" + // + // should produce: + // CreateTimeSeries(uniqueTimeSeries) :: ["a/b/c", "x/y/z", "p/y/z", "d/y/z"] + // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c"] + // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c", "x/y/z"] + nonUniqueRequests := se.combineTimeSeriesToCreateTimeSeriesRequest(nonUniqueTimeSeries) + ctsreql = append(ctsreql, nonUniqueRequests...) + + return ctsreql +} + +// protoMetricToTimeSeries converts a metric into a Stackdriver Monitoring v3 API CreateTimeSeriesRequest +// but it doesn't invoke any remote API. +func (se *statsExporter) protoMetricToTimeSeries(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) ([]*monitoringpb.TimeSeries, error) { + if metric == nil { + return nil, errNilMetric + } + + var resource = rsc + if metric.Resource != nil { + resource = metric.Resource + } + + metricName, _, _, _ := metricProseFromProto(metric) + metricType, _ := se.metricTypeFromProto(metricName) + metricLabelKeys := metric.GetMetricDescriptor().GetLabelKeys() + metricKind, _ := protoMetricDescriptorTypeToMetricKind(metric) + + timeSeries := make([]*monitoringpb.TimeSeries, 0, len(metric.Timeseries)) + for _, protoTimeSeries := range metric.Timeseries { + sdPoints, err := se.protoTimeSeriesToMonitoringPoints(protoTimeSeries, metricKind) + if err != nil { + return nil, err + } + + // Each TimeSeries has labelValues which MUST be correlated + // with that from the MetricDescriptor + labels, err := labelsPerTimeSeries(se.defaultLabels, metricLabelKeys, protoTimeSeries.GetLabelValues()) + if err != nil { + // TODO: (@odeke-em) perhaps log this error from labels extraction, if non-nil. + continue + } + timeSeries = append(timeSeries, &monitoringpb.TimeSeries{ + Metric: &googlemetricpb.Metric{ + Type: metricType, + Labels: labels, + }, + Resource: protoResourceToMonitoredResource(resource), + Points: sdPoints, + }) + } + + return timeSeries, nil +} + +func labelsPerTimeSeries(defaults map[string]labelValue, labelKeys []*metricspb.LabelKey, labelValues []*metricspb.LabelValue) (map[string]string, error) { + labels := make(map[string]string) + // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched. + for key, label := range defaults { + labels[sanitize(key)] = label.val + } + + // Perform this sanity check now. + if len(labelKeys) != len(labelValues) { + return labels, fmt.Errorf("Length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues)) + } + + for i, labelKey := range labelKeys { + labelValue := labelValues[i] + labels[sanitize(labelKey.GetKey())] = labelValue.GetValue() + } + + return labels, nil +} + +func (se *statsExporter) protoMetricDescriptorToCreateMetricDescriptorRequest(ctx context.Context, metric *metricspb.Metric) (*monitoringpb.CreateMetricDescriptorRequest, error) { + // Otherwise, we encountered a cache-miss and + // should create the metric descriptor remotely. + inMD, err := se.protoToMonitoringMetricDescriptor(metric) + if err != nil { + return nil, err + } + + cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", se.o.ProjectID), + MetricDescriptor: inMD, + } + + return cmrdesc, nil +} + +// createMetricDescriptor creates a metric descriptor from the OpenCensus proto metric +// and then creates it remotely using Stackdriver's API. +func (se *statsExporter) createMetricDescriptor(ctx context.Context, metric *metricspb.Metric) error { + se.protoMu.Lock() + defer se.protoMu.Unlock() + + name := metric.GetMetricDescriptor().GetName() + if _, created := se.protoMetricDescriptors[name]; created { + return nil + } + + // Otherwise, we encountered a cache-miss and + // should create the metric descriptor remotely. + inMD, err := se.protoToMonitoringMetricDescriptor(metric) + if err != nil { + return err + } + + var md *googlemetricpb.MetricDescriptor + if builtinMetric(inMD.Type) { + gmrdesc := &monitoringpb.GetMetricDescriptorRequest{ + Name: inMD.Name, + } + md, err = getMetricDescriptor(ctx, se.c, gmrdesc) + } else { + + cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", se.o.ProjectID), + MetricDescriptor: inMD, + } + md, err = createMetricDescriptor(ctx, se.c, cmrdesc) + } + + if err == nil { + // Now record the metric as having been created. + se.protoMetricDescriptors[name] = md + } + + return err +} + +func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) (sptl []*monitoringpb.Point, err error) { + for _, pt := range ts.Points { + + // If we have a last value aggregation point i.e. MetricDescriptor_GAUGE + // StartTime should be nil. + startTime := ts.StartTimestamp + if metricKind == googlemetricpb.MetricDescriptor_GAUGE { + startTime = nil + } + + spt, err := fromProtoPoint(startTime, pt) + if err != nil { + return nil, err + } + sptl = append(sptl, spt) + } + return sptl, nil +} + +func (se *statsExporter) protoToMonitoringMetricDescriptor(metric *metricspb.Metric) (*googlemetricpb.MetricDescriptor, error) { + if metric == nil { + return nil, errNilMetric + } + + metricName, description, unit, _ := metricProseFromProto(metric) + metricType, _ := se.metricTypeFromProto(metricName) + displayName := se.displayName(metricName) + metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric) + + sdm := &googlemetricpb.MetricDescriptor{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", se.o.ProjectID, metricType), + DisplayName: displayName, + Description: description, + Unit: unit, + Type: metricType, + MetricKind: metricKind, + ValueType: valueType, + Labels: labelDescriptorsFromProto(se.defaultLabels, metric.GetMetricDescriptor().GetLabelKeys()), + } + + return sdm, nil +} + +func labelDescriptorsFromProto(defaults map[string]labelValue, protoLabelKeys []*metricspb.LabelKey) []*labelpb.LabelDescriptor { + labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(defaults)+len(protoLabelKeys)) + + // Fill in the defaults first. + for key, lbl := range defaults { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key), + Description: lbl.desc, + ValueType: labelpb.LabelDescriptor_STRING, + }) + } + + // Now fill in those from the metric. + for _, protoKey := range protoLabelKeys { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(protoKey.GetKey()), + Description: protoKey.GetDescription(), + ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags + }) + } + return labelDescriptors +} + +func metricProseFromProto(metric *metricspb.Metric) (name, description, unit string, ok bool) { + mname := metric.GetName() + if mname != "" { + name = mname + return + } + + md := metric.GetMetricDescriptor() + + name = md.GetName() + unit = md.GetUnit() + description = md.GetDescription() + + if md != nil && md.Type == metricspb.MetricDescriptor_CUMULATIVE_INT64 { + // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", + // because this view does not apply to the recorded values. + unit = stats.UnitDimensionless + } + + return +} + +func (se *statsExporter) metricTypeFromProto(name string) (string, bool) { + // TODO: (@odeke-em) support non-"custom.googleapis.com" metrics names. + name = path.Join("custom.googleapis.com", "opencensus", name) + return name, true +} + +func fromProtoPoint(startTime *timestamp.Timestamp, pt *metricspb.Point) (*monitoringpb.Point, error) { + if pt == nil { + return nil, nil + } + + mptv, err := protoToMetricPoint(pt.Value) + if err != nil { + return nil, err + } + + mpt := &monitoringpb.Point{ + Value: mptv, + Interval: &monitoringpb.TimeInterval{ + StartTime: startTime, + EndTime: pt.Timestamp, + }, + } + return mpt, nil +} + +func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) { + if value == nil { + return nil, nil + } + + var err error + var tval *monitoringpb.TypedValue + switch v := value.(type) { + default: + // All the other types are not yet handled. + // TODO: (@odeke-em, @songy23) talk to the Stackdriver team to determine + // the use cases for: + // + // *TypedValue_BoolValue + // *TypedValue_StringValue + // + // and then file feature requests on OpenCensus-Specs and then OpenCensus-Proto, + // lest we shall error here. + // + // TODO: Add conversion from SummaryValue when + // https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/66 + // has been figured out. + err = fmt.Errorf("protoToMetricPoint: unknown Data type: %T", value) + + case *metricspb.Point_Int64Value: + tval = &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v.Int64Value, + }, + } + + case *metricspb.Point_DoubleValue: + tval = &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.DoubleValue, + }, + } + + case *metricspb.Point_DistributionValue: + dv := v.DistributionValue + var mv *monitoringpb.TypedValue_DistributionValue + if dv != nil { + var mean float64 + if dv.Count > 0 { + mean = float64(dv.Sum) / float64(dv.Count) + } + mv = &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distributionpb.Distribution{ + Count: dv.Count, + Mean: mean, + SumOfSquaredDeviation: dv.SumOfSquaredDeviation, + BucketCounts: bucketCounts(dv.Buckets), + }, + } + + if bopts := dv.BucketOptions; bopts != nil && bopts.Type != nil { + bexp, ok := bopts.Type.(*metricspb.DistributionValue_BucketOptions_Explicit_) + if ok && bexp != nil && bexp.Explicit != nil { + mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{ + Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ + Bounds: bexp.Explicit.Bounds[:], + }, + }, + } + } + } + } + tval = &monitoringpb.TypedValue{Value: mv} + } + + return tval, err +} + +func bucketCounts(buckets []*metricspb.DistributionValue_Bucket) []int64 { + bucketCounts := make([]int64, len(buckets)) + for i, bucket := range buckets { + if bucket != nil { + bucketCounts[i] = bucket.Count + } + } + return bucketCounts +} + +func protoMetricDescriptorTypeToMetricKind(m *metricspb.Metric) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) { + dt := m.GetMetricDescriptor() + if dt == nil { + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } + + switch dt.Type { + case metricspb.MetricDescriptor_CUMULATIVE_INT64: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64 + + case metricspb.MetricDescriptor_CUMULATIVE_DOUBLE: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE + + case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION + + case metricspb.MetricDescriptor_GAUGE_DOUBLE: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE + + case metricspb.MetricDescriptor_GAUGE_INT64: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 + + case metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DISTRIBUTION + + default: + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } +} + +func protoResourceToMonitoredResource(rsp *resourcepb.Resource) *monitoredrespb.MonitoredResource { + if rsp == nil { + return &monitoredrespb.MonitoredResource{ + Type: "global", + } + } + typ := rsp.Type + if typ == "" { + typ = "global" + } + mrsp := &monitoredrespb.MonitoredResource{ + Type: typ, + } + if rsp.Labels != nil { + mrsp.Labels = make(map[string]string, len(rsp.Labels)) + for k, v := range rsp.Labels { + mrsp.Labels[k] = v + } + } + return mrsp +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go new file mode 100644 index 000000000..d6a23a8cf --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go @@ -0,0 +1,53 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +// awsIdentityDocument is used to store parsed AWS Identity Document. +type awsIdentityDocument struct { + // accountID is the AWS account number for the VM. + accountID string + + // instanceID is the instance id of the instance. + instanceID string + + // Region is the AWS region for the VM. + region string +} + +// retrieveAWSIdentityDocument attempts to retrieve AWS Identity Document. +// If the environment is AWS EC2 Instance then a valid document is retrieved. +// Relevant attributes from the document are stored in awsIdentityDoc. +// This is only done once. +func retrieveAWSIdentityDocument() *awsIdentityDocument { + awsIdentityDoc := awsIdentityDocument{} + c := ec2metadata.New(session.New()) + if c.Available() == false { + return nil + } + ec2InstanceIdentifyDocument, err := c.GetInstanceIdentityDocument() + if err != nil { + return nil + } + awsIdentityDoc.region = ec2InstanceIdentifyDocument.Region + awsIdentityDoc.instanceID = ec2InstanceIdentifyDocument.InstanceID + awsIdentityDoc.accountID = ec2InstanceIdentifyDocument.AccountID + + return &awsIdentityDoc +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go new file mode 100644 index 000000000..ceb754e51 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go @@ -0,0 +1,90 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "log" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" +) + +// gcpMetadata represents metadata retrieved from GCP (GKE and GCE) environment. +type gcpMetadata struct { + + // projectID is the identifier of the GCP project associated with this resource, such as "my-project". + projectID string + + // instanceID is the numeric VM instance identifier assigned by Compute Engine. + instanceID string + + // clusterName is the name for the cluster the container is running in. + clusterName string + + // containerName is the name of the container. + containerName string + + // namespaceID is the identifier for the cluster namespace the container is running in + namespaceID string + + // podID is the identifier for the pod the container is running in. + podID string + + // zone is the Compute Engine zone in which the VM is running. + zone string +} + +// retrieveGCPMetadata retrieves value of each Attribute from Metadata Server +// in GKE container and GCE instance environment. +// Some attributes are retrieved from the system environment. +// This is only executed detectOnce. +func retrieveGCPMetadata() *gcpMetadata { + gcpMetadata := gcpMetadata{} + var err error + gcpMetadata.instanceID, err = metadata.InstanceID() + if err != nil { + // Not a GCP environment + return &gcpMetadata + } + + gcpMetadata.projectID, err = metadata.ProjectID() + logError(err) + + gcpMetadata.zone, err = metadata.Zone() + logError(err) + + clusterName, err := metadata.InstanceAttributeValue("cluster-name") + logError(err) + gcpMetadata.clusterName = strings.TrimSpace(clusterName) + + // Following attributes are derived from environment variables. They are configured + // via yaml file. For details refer to: + // https://cloud.google.com/kubernetes-engine/docs/tutorials/custom-metrics-autoscaling#exporting_metrics_from_the_application + gcpMetadata.namespaceID = os.Getenv("NAMESPACE") + gcpMetadata.containerName = os.Getenv("CONTAINER_NAME") + gcpMetadata.podID = os.Getenv("HOSTNAME") + + return &gcpMetadata +} + +// logError logs error only if the error is present and it is not 'not defined' +func logError(err error) { + if err != nil { + if !strings.Contains(err.Error(), "not defined") { + log.Printf("Error retrieving gcp metadata: %v", err) + } + } +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go new file mode 100644 index 000000000..c07e55ce0 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go @@ -0,0 +1,217 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "fmt" + "os" + "sync" +) + +// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface +type Interface interface { + + // MonitoredResource returns the resource type and resource labels. + MonitoredResource() (resType string, labels map[string]string) +} + +// GKEContainer represents gke_container type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_gke_container +type GKEContainer struct { + + // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". + ProjectID string + + // InstanceID is the numeric VM instance identifier assigned by Compute Engine. + InstanceID string + + // ClusterName is the name for the cluster the container is running in. + ClusterName string + + // ContainerName is the name of the container. + ContainerName string + + // NamespaceID is the identifier for the cluster namespace the container is running in + NamespaceID string + + // PodID is the identifier for the pod the container is running in. + PodID string + + // Zone is the Compute Engine zone in which the VM is running. + Zone string +} + +// MonitoredResource returns resource type and resource labels for GKEContainer +func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "project_id": gke.ProjectID, + "instance_id": gke.InstanceID, + "zone": gke.Zone, + "cluster_name": gke.ClusterName, + "container_name": gke.ContainerName, + "namespace_id": gke.NamespaceID, + "pod_id": gke.PodID, + } + return "gke_container", labels +} + +// GCEInstance represents gce_instance type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_gce_instance +type GCEInstance struct { + + // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". + ProjectID string + + // InstanceID is the numeric VM instance identifier assigned by Compute Engine. + InstanceID string + + // Zone is the Compute Engine zone in which the VM is running. + Zone string +} + +// MonitoredResource returns resource type and resource labels for GCEInstance +func (gce *GCEInstance) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "project_id": gce.ProjectID, + "instance_id": gce.InstanceID, + "zone": gce.Zone, + } + return "gce_instance", labels +} + +// AWSEC2Instance represents aws_ec2_instance type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_aws_ec2_instance +type AWSEC2Instance struct { + + // AWSAccount is the AWS account number for the VM. + AWSAccount string + + // InstanceID is the instance id of the instance. + InstanceID string + + // Region is the AWS region for the VM. The format of this field is "aws:{region}", + // where supported values for {region} are listed at + // http://docs.aws.amazon.com/general/latest/gr/rande.html. + Region string +} + +// MonitoredResource returns resource type and resource labels for AWSEC2Instance +func (aws *AWSEC2Instance) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "aws_account": aws.AWSAccount, + "instance_id": aws.InstanceID, + "region": aws.Region, + } + return "aws_ec2_instance", labels +} + +// Autodetect auto detects monitored resources based on +// the environment where the application is running. +// It supports detection of following resource types +// 1. gke_container: +// 2. gce_instance: +// 3. aws_ec2_instance: +// +// Returns MonitoredResInterface which implements getLabels() and getType() +// For resource definition go to https://cloud.google.com/monitoring/api/resources +func Autodetect() Interface { + return func() Interface { + var autoDetected Interface + var awsIdentityDoc *awsIdentityDocument + var gcpMetadata *gcpMetadata + detectOnce.Do(func() { + + // First attempts to retrieve AWS Identity Doc and GCP metadata. + // It then determines the resource type + // In GCP and AWS environment both func finishes quickly. However, + // in an environment other than those (e.g local laptop) it + // takes 2 seconds for GCP and 5-6 for AWS. + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + awsIdentityDoc = retrieveAWSIdentityDocument() + }() + go func() { + defer wg.Done() + gcpMetadata = retrieveGCPMetadata() + }() + + wg.Wait() + autoDetected = detectResourceType(awsIdentityDoc, gcpMetadata) + }) + return autoDetected + }() + +} + +// createAWSEC2InstanceMonitoredResource creates a aws_ec2_instance monitored resource +// awsIdentityDoc contains AWS EC2 specific attributes. +func createAWSEC2InstanceMonitoredResource(awsIdentityDoc *awsIdentityDocument) *AWSEC2Instance { + awsInstance := AWSEC2Instance{ + AWSAccount: awsIdentityDoc.accountID, + InstanceID: awsIdentityDoc.instanceID, + Region: fmt.Sprintf("aws:%s", awsIdentityDoc.region), + } + return &awsInstance +} + +// createGCEInstanceMonitoredResource creates a gce_instance monitored resource +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func createGCEInstanceMonitoredResource(gcpMetadata *gcpMetadata) *GCEInstance { + gceInstance := GCEInstance{ + ProjectID: gcpMetadata.projectID, + InstanceID: gcpMetadata.instanceID, + Zone: gcpMetadata.zone, + } + return &gceInstance +} + +// createGKEContainerMonitoredResource creates a gke_container monitored resource +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func createGKEContainerMonitoredResource(gcpMetadata *gcpMetadata) *GKEContainer { + gkeContainer := GKEContainer{ + ProjectID: gcpMetadata.projectID, + InstanceID: gcpMetadata.instanceID, + Zone: gcpMetadata.zone, + ContainerName: gcpMetadata.containerName, + ClusterName: gcpMetadata.clusterName, + NamespaceID: gcpMetadata.namespaceID, + PodID: gcpMetadata.podID, + } + return &gkeContainer +} + +// detectOnce is used to make sure GCP and AWS metadata detect function executes only once. +var detectOnce sync.Once + +// detectResourceType determines the resource type. +// awsIdentityDoc contains AWS EC2 attributes. nil if it is not AWS EC2 environment +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func detectResourceType(awsIdentityDoc *awsIdentityDocument, gcpMetadata *gcpMetadata) Interface { + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + gcpMetadata != nil && gcpMetadata.instanceID != "" { + return createGKEContainerMonitoredResource(gcpMetadata) + } else if gcpMetadata != nil && gcpMetadata.instanceID != "" { + return createGCEInstanceMonitoredResource(gcpMetadata) + } else if awsIdentityDoc != nil { + return createAWSEC2InstanceMonitoredResource(awsIdentityDoc) + } + return nil +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go new file mode 100644 index 000000000..184bb1d43 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go new file mode 100644 index 000000000..595377368 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go @@ -0,0 +1,346 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdriver contains the OpenCensus exporters for +// Stackdriver Monitoring and Stackdriver Tracing. +// +// This exporter can be used to send metrics to Stackdriver Monitoring and traces +// to Stackdriver trace. +// +// The package uses Application Default Credentials to authenticate by default. +// See: https://developers.google.com/identity/protocols/application-default-credentials +// +// Alternatively, pass the authentication options in both the MonitoringClientOptions +// and the TraceClientOptions fields of Options. +// +// Stackdriver Monitoring +// +// This exporter support exporting OpenCensus views to Stackdriver Monitoring. +// Each registered view becomes a metric in Stackdriver Monitoring, with the +// tags becoming labels. +// +// The aggregation function determines the metric kind: LastValue aggregations +// generate Gauge metrics and all other aggregations generate Cumulative metrics. +// +// In order to be able to push your stats to Stackdriver Monitoring, you must: +// +// 1. Create a Cloud project: https://support.google.com/cloud/answer/6251787?hl=en +// 2. Enable billing: https://support.google.com/cloud/answer/6288653#new-billing +// 3. Enable the Stackdriver Monitoring API: https://console.cloud.google.com/apis/dashboard +// +// These steps enable the API but don't require that your app is hosted on Google Cloud Platform. +// +// Stackdriver Trace +// +// This exporter supports exporting Trace Spans to Stackdriver Trace. It also +// supports the Google "Cloud Trace" propagation format header. +package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver" + +import ( + "context" + "errors" + "fmt" + "log" + "time" + + traceapi "cloud.google.com/go/trace/apiv2" + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" +) + +// Options contains options for configuring the exporter. +type Options struct { + // ProjectID is the identifier of the Stackdriver + // project the user is uploading the stats data to. + // If not set, this will default to your "Application Default Credentials". + // For details see: https://developers.google.com/accounts/docs/application-default-credentials + ProjectID string + + // OnError is the hook to be called when there is + // an error uploading the stats or tracing data. + // If no custom hook is set, errors are logged. + // Optional. + OnError func(err error) + + // MonitoringClientOptions are additional options to be passed + // to the underlying Stackdriver Monitoring API client. + // Optional. + MonitoringClientOptions []option.ClientOption + + // TraceClientOptions are additional options to be passed + // to the underlying Stackdriver Trace API client. + // Optional. + TraceClientOptions []option.ClientOption + + // BundleDelayThreshold determines the max amount of time + // the exporter can wait before uploading view data or trace spans to + // the backend. + // Optional. + BundleDelayThreshold time.Duration + + // BundleCountThreshold determines how many view data events or trace spans + // can be buffered before batch uploading them to the backend. + // Optional. + BundleCountThreshold int + + // TraceSpansBufferMaxBytes is the maximum size (in bytes) of spans that + // will be buffered in memory before being dropped. + // + // If unset, a default of 8MB will be used. + TraceSpansBufferMaxBytes int + + // Resource sets the MonitoredResource against which all views will be + // recorded by this exporter. + // + // All Stackdriver metrics created by this exporter are custom metrics, + // so only a limited number of MonitoredResource types are supported, see: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource + // + // An important consideration when setting the Resource here is that + // Stackdriver Monitoring only allows a single writer per + // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series + // A TimeSeries is uniquely defined by the metric type name + // (constructed from the view name and the MetricPrefix), the Resource field, + // and the set of label key/value pairs (in OpenCensus terminology: tag). + // + // If no custom Resource is set, a default MonitoredResource + // with type global and no resource labels will be used. If you explicitly + // set this field, you may also want to set custom DefaultMonitoringLabels. + // + // Deprecated: Use MonitoredResource instead. + Resource *monitoredrespb.MonitoredResource + + // MonitoredResource sets the MonitoredResource against which all views will be + // recorded by this exporter. + // + // All Stackdriver metrics created by this exporter are custom metrics, + // so only a limited number of MonitoredResource types are supported, see: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource + // + // An important consideration when setting the MonitoredResource here is that + // Stackdriver Monitoring only allows a single writer per + // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series + // A TimeSeries is uniquely defined by the metric type name + // (constructed from the view name and the MetricPrefix), the MonitoredResource field, + // and the set of label key/value pairs (in OpenCensus terminology: tag). + // + // If no custom MonitoredResource is set AND if Resource is also not set then + // a default MonitoredResource with type global and no resource labels will be used. + // If you explicitly set this field, you may also want to set custom DefaultMonitoringLabels. + // + // This field replaces Resource field. If this is set then it will override the + // Resource field. + // Optional, but encouraged. + MonitoredResource monitoredresource.Interface + + // MetricPrefix overrides the prefix of a Stackdriver metric display names. + // Optional. If unset defaults to "OpenCensus/". + // Deprecated: Provide GetMetricDisplayName to change the display name of + // the metric. + // If GetMetricDisplayName is non-nil, this option is ignored. + MetricPrefix string + + // GetMetricDisplayName allows customizing the display name for the metric + // associated with the given view. By default it will be: + // MetricPrefix + view.Name + GetMetricDisplayName func(view *view.View) string + + // GetMetricType allows customizing the metric type for the given view. + // By default, it will be: + // "custom.googleapis.com/opencensus/" + view.Name + // + // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor + GetMetricType func(view *view.View) string + + // DefaultTraceAttributes will be appended to every span that is exported to + // Stackdriver Trace. + DefaultTraceAttributes map[string]interface{} + + // DefaultMonitoringLabels are labels added to every metric created by this + // exporter in Stackdriver Monitoring. + // + // If unset, this defaults to a single label with key "opencensus_task" and + // value "go-@". This default ensures that the set of labels + // together with the default Resource (global) are unique to this + // process, as required by Stackdriver Monitoring. + // + // If you set DefaultMonitoringLabels, make sure that the Resource field + // together with these labels is unique to the + // current process. This is to ensure that there is only a single writer to + // each TimeSeries in Stackdriver. + // + // Set this to &Labels{} (a pointer to an empty Labels) to avoid getting the + // default "opencensus_task" label. You should only do this if you know that + // the Resource you set uniquely identifies this Go process. + DefaultMonitoringLabels *Labels + + // Context allows you to provide a custom context for API calls. + // + // This context will be used several times: first, to create Stackdriver + // trace and metric clients, and then every time a new batch of traces or + // stats needs to be uploaded. + // + // Do not set a timeout on this context. Instead, set the Timeout option. + // + // If unset, context.Background() will be used. + Context context.Context + + // Timeout for all API calls. If not set, defaults to 5 seconds. + Timeout time.Duration + + // GetMonitoredResource may be provided to supply the details of the + // monitored resource dynamically based on the tags associated with each + // data point. Most users will not need to set this, but should instead + // set the MonitoredResource field. + // + // GetMonitoredResource may add or remove tags by returning a new set of + // tags. It is safe for the function to mutate its argument and return it. + // + // See the documentation on the MonitoredResource field for guidance on the + // interaction between monitored resources and labels. + // + // The MonitoredResource field is ignored if this field is set to a non-nil + // value. + GetMonitoredResource func(*view.View, []tag.Tag) ([]tag.Tag, monitoredresource.Interface) +} + +const defaultTimeout = 5 * time.Second + +// Exporter is a stats and trace exporter that uploads data to Stackdriver. +// +// You can create a single Exporter and register it as both a trace exporter +// (to export to Stackdriver Trace) and a stats exporter (to integrate with +// Stackdriver Monitoring). +type Exporter struct { + traceExporter *traceExporter + statsExporter *statsExporter +} + +// NewExporter creates a new Exporter that implements both stats.Exporter and +// trace.Exporter. +func NewExporter(o Options) (*Exporter, error) { + if o.ProjectID == "" { + ctx := o.Context + if ctx == nil { + ctx = context.Background() + } + creds, err := google.FindDefaultCredentials(ctx, traceapi.DefaultAuthScopes()...) + if err != nil { + return nil, fmt.Errorf("stackdriver: %v", err) + } + if creds.ProjectID == "" { + return nil, errors.New("stackdriver: no project found with application default credentials") + } + o.ProjectID = creds.ProjectID + } + + if o.MonitoredResource != nil { + o.Resource = convertMonitoredResourceToPB(o.MonitoredResource) + } + + se, err := newStatsExporter(o) + if err != nil { + return nil, err + } + te, err := newTraceExporter(o) + if err != nil { + return nil, err + } + return &Exporter{ + statsExporter: se, + traceExporter: te, + }, nil +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +func (e *Exporter) ExportView(vd *view.Data) { + e.statsExporter.ExportView(vd) +} + +// ExportMetric exports OpenCensus Metrics to Stackdriver Monitoring. +func (e *Exporter) ExportMetric(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) error { + return e.statsExporter.ExportMetric(ctx, node, rsc, metric) +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *Exporter) ExportSpan(sd *trace.SpanData) { + if len(e.traceExporter.o.DefaultTraceAttributes) > 0 { + sd = e.sdWithDefaultTraceAttributes(sd) + } + e.traceExporter.ExportSpan(sd) +} + +func (e *Exporter) sdWithDefaultTraceAttributes(sd *trace.SpanData) *trace.SpanData { + newSD := *sd + newSD.Attributes = make(map[string]interface{}) + for k, v := range e.traceExporter.o.DefaultTraceAttributes { + newSD.Attributes[k] = v + } + for k, v := range sd.Attributes { + newSD.Attributes[k] = v + } + return &newSD +} + +// Flush waits for exported data to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose recent stats or spans. +func (e *Exporter) Flush() { + e.statsExporter.Flush() + e.traceExporter.Flush() +} + +func (o Options) handleError(err error) { + if o.OnError != nil { + o.OnError(err) + return + } + log.Printf("Failed to export to Stackdriver: %v", err) +} + +func (o Options) newContextWithTimeout() (context.Context, func()) { + ctx := o.Context + if ctx == nil { + ctx = context.Background() + } + timeout := o.Timeout + if timeout <= 0 { + timeout = defaultTimeout + } + return context.WithTimeout(ctx, timeout) +} + +// convertMonitoredResourceToPB converts MonitoredResource data in to +// protocol buffer. +func convertMonitoredResourceToPB(mr monitoredresource.Interface) *monitoredrespb.MonitoredResource { + mrpb := new(monitoredrespb.MonitoredResource) + var labels map[string]string + mrpb.Type, labels = mr.MonitoredResource() + mrpb.Labels = make(map[string]string) + for k, v := range labels { + mrpb.Labels[k] = v + } + return mrpb +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go new file mode 100644 index 000000000..ca82ca71b --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go @@ -0,0 +1,571 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + + "go.opencensus.io" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/ptypes/timestamp" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + labelpb "google.golang.org/genproto/googleapis/api/label" + "google.golang.org/genproto/googleapis/api/metric" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +const ( + maxTimeSeriesPerUpload = 200 + opencensusTaskKey = "opencensus_task" + opencensusTaskDescription = "Opencensus task identifier" + defaultDisplayNamePrefix = "OpenCensus" + version = "0.10.0" +) + +var userAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencensus.Version(), version) + +// statsExporter exports stats to the Stackdriver Monitoring. +type statsExporter struct { + o Options + + viewDataBundler *bundler.Bundler + protoMetricsBundler *bundler.Bundler + + createdViewsMu sync.Mutex + createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely + + protoMu sync.Mutex + protoMetricDescriptors map[string]*metricpb.MetricDescriptor // Saves the metric descriptors that were already created remotely + + c *monitoring.MetricClient + defaultLabels map[string]labelValue +} + +var ( + errBlankProjectID = errors.New("expecting a non-blank ProjectID") +) + +// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring. +// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent +// invocations of NewExporter with the same ProjectID will return an error. +func newStatsExporter(o Options) (*statsExporter, error) { + if strings.TrimSpace(o.ProjectID) == "" { + return nil, errBlankProjectID + } + + opts := append(o.MonitoringClientOptions, option.WithUserAgent(userAgent)) + ctx, cancel := o.newContextWithTimeout() + defer cancel() + client, err := monitoring.NewMetricClient(ctx, opts...) + if err != nil { + return nil, err + } + e := &statsExporter{ + c: client, + o: o, + createdViews: make(map[string]*metricpb.MetricDescriptor), + protoMetricDescriptors: make(map[string]*metricpb.MetricDescriptor), + } + + if o.DefaultMonitoringLabels != nil { + e.defaultLabels = o.DefaultMonitoringLabels.m + } else { + e.defaultLabels = map[string]labelValue{ + opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription}, + } + } + + e.viewDataBundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { + vds := bundle.([]*view.Data) + e.handleUpload(vds...) + }) + e.protoMetricsBundler = bundler.NewBundler((*metricPayload)(nil), func(bundle interface{}) { + payloads := bundle.([]*metricPayload) + e.handleMetricsUpload(payloads) + }) + if delayThreshold := e.o.BundleDelayThreshold; delayThreshold > 0 { + e.viewDataBundler.DelayThreshold = delayThreshold + e.protoMetricsBundler.DelayThreshold = delayThreshold + } + if countThreshold := e.o.BundleCountThreshold; countThreshold > 0 { + e.viewDataBundler.BundleCountThreshold = countThreshold + e.protoMetricsBundler.BundleCountThreshold = countThreshold + } + return e, nil +} + +func (e *statsExporter) getMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, *monitoredrespb.MonitoredResource) { + if get := e.o.GetMonitoredResource; get != nil { + newTags, mr := get(v, tags) + return newTags, convertMonitoredResourceToPB(mr) + } else { + resource := e.o.Resource + if resource == nil { + resource = &monitoredrespb.MonitoredResource{ + Type: "global", + } + } + return tags, resource + } +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +func (e *statsExporter) ExportView(vd *view.Data) { + if len(vd.Rows) == 0 { + return + } + err := e.viewDataBundler.Add(vd, 1) + switch err { + case nil: + return + case bundler.ErrOverflow: + e.o.handleError(errors.New("failed to upload: buffer full")) + default: + e.o.handleError(err) + } +} + +// getTaskValue returns a task label value in the format of +// "go-@". +func getTaskValue() string { + hostname, err := os.Hostname() + if err != nil { + hostname = "localhost" + } + return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname +} + +// handleUpload handles uploading a slice +// of Data, as well as error handling. +func (e *statsExporter) handleUpload(vds ...*view.Data) { + if err := e.uploadStats(vds); err != nil { + e.o.handleError(err) + } +} + +// Flush waits for exported view data and metrics to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose data that hasn't yet been exported. +func (e *statsExporter) Flush() { + e.viewDataBundler.Flush() + e.protoMetricsBundler.Flush() +} + +func (e *statsExporter) uploadStats(vds []*view.Data) error { + ctx, cancel := e.o.newContextWithTimeout() + defer cancel() + ctx, span := trace.StartSpan( + ctx, + "contrib.go.opencensus.io/exporter/stackdriver.uploadStats", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + + for _, vd := range vds { + if err := e.createMeasure(ctx, vd.View); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + return err + } + } + for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { + if err := createTimeSeries(ctx, e.c, req); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + // TODO(jbd): Don't fail fast here, batch errors? + return err + } + } + return nil +} + +func (se *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest { + var reqs []*monitoringpb.CreateTimeSeriesRequest + + var allTimeSeries []*monitoringpb.TimeSeries + for _, vd := range vds { + for _, row := range vd.Rows { + tags, resource := se.getMonitoredResource(vd.View, append([]tag.Tag(nil), row.Tags...)) + ts := &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: se.metricType(vd.View), + Labels: newLabels(se.defaultLabels, tags), + }, + Resource: resource, + Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)}, + } + allTimeSeries = append(allTimeSeries, ts) + } + } + + var timeSeries []*monitoringpb.TimeSeries + for _, ts := range allTimeSeries { + timeSeries = append(timeSeries, ts) + if len(timeSeries) == limit { + ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries) + reqs = append(reqs, ctsreql...) + timeSeries = timeSeries[:0] + } + } + + if len(timeSeries) > 0 { + ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries) + reqs = append(reqs, ctsreql...) + } + return reqs +} + +func (e *statsExporter) viewToMetricDescriptor(ctx context.Context, v *view.View) (*metricpb.MetricDescriptor, error) { + m := v.Measure + agg := v.Aggregation + viewName := v.Name + + metricType := e.metricType(v) + var valueType metricpb.MetricDescriptor_ValueType + unit := m.Unit() + // Default metric Kind + metricKind := metricpb.MetricDescriptor_CUMULATIVE + + switch agg.Type { + case view.AggTypeCount: + valueType = metricpb.MetricDescriptor_INT64 + // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", + // because this view does not apply to the recorded values. + unit = stats.UnitDimensionless + case view.AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + case view.AggTypeDistribution: + valueType = metricpb.MetricDescriptor_DISTRIBUTION + case view.AggTypeLastValue: + metricKind = metricpb.MetricDescriptor_GAUGE + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + default: + return nil, fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) + } + + var displayName string + if e.o.GetMetricDisplayName == nil { + displayName = e.displayName(viewName) + } else { + displayName = e.o.GetMetricDisplayName(v) + } + + res := &metricpb.MetricDescriptor{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType), + DisplayName: displayName, + Description: v.Description, + Unit: unit, + Type: metricType, + MetricKind: metricKind, + ValueType: valueType, + Labels: newLabelDescriptors(e.defaultLabels, v.TagKeys), + } + return res, nil +} + +func (e *statsExporter) viewToCreateMetricDescriptorRequest(ctx context.Context, v *view.View) (*monitoringpb.CreateMetricDescriptorRequest, error) { + inMD, err := e.viewToMetricDescriptor(ctx, v) + if err != nil { + return nil, err + } + + cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", e.o.ProjectID), + MetricDescriptor: inMD, + } + return cmrdesc, nil +} + +// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring. +// An error will be returned if there is already a metric descriptor created with the same name +// but it has a different aggregation or keys. +func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error { + e.createdViewsMu.Lock() + defer e.createdViewsMu.Unlock() + + viewName := v.Name + + if md, ok := e.createdViews[viewName]; ok { + // [TODO:rghetia] Temporary fix for https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/76#issuecomment-459459091 + if builtinMetric(md.Type) { + return nil + } + return e.equalMeasureAggTagKeys(md, v.Measure, v.Aggregation, v.TagKeys) + } + + inMD, err := e.viewToMetricDescriptor(ctx, v) + if err != nil { + return err + } + + var dmd *metric.MetricDescriptor + if builtinMetric(inMD.Type) { + gmrdesc := &monitoringpb.GetMetricDescriptorRequest{ + Name: inMD.Name, + } + dmd, err = getMetricDescriptor(ctx, e.c, gmrdesc) + } else { + cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", e.o.ProjectID), + MetricDescriptor: inMD, + } + dmd, err = createMetricDescriptor(ctx, e.c, cmrdesc) + } + if err != nil { + return err + } + + // Now cache the metric descriptor + e.createdViews[viewName] = dmd + return err +} + +func (e *statsExporter) displayName(suffix string) string { + displayNamePrefix := defaultDisplayNamePrefix + if e.o.MetricPrefix != "" { + displayNamePrefix = e.o.MetricPrefix + } + return path.Join(displayNamePrefix, suffix) +} + +func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { + switch v.Aggregation.Type { + case view.AggTypeLastValue: + return newGaugePoint(v, row, end) + default: + return newCumulativePoint(v, row, start, end) + } +} + +func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { + return &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: newTypedValue(v, row), + } +} + +func newGaugePoint(v *view.View, row *view.Row, end time.Time) *monitoringpb.Point { + gaugeTime := ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + } + return &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: gaugeTime, + }, + Value: newTypedValue(v, row), + } +} + +func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { + switch v := r.Data.(type) { + case *view.CountData: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v.Value, + }} + case *view.SumData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + case *view.DistributionData: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distributionpb.Distribution{ + Count: v.Count, + Mean: v.Mean, + SumOfSquaredDeviation: v.SumOfSquaredDev, + // TODO(songya): uncomment this once Stackdriver supports min/max. + // Range: &distributionpb.Distribution_Range{ + // Min: v.Min, + // Max: v.Max, + // }, + BucketOptions: &distributionpb.Distribution_BucketOptions{ + Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ + Bounds: vd.Aggregation.Buckets, + }, + }, + }, + BucketCounts: v.CountPerBucket, + }, + }} + case *view.LastValueData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + } + return nil +} + +func (e *statsExporter) metricType(v *view.View) string { + if formatter := e.o.GetMetricType; formatter != nil { + return formatter(v) + } else { + return path.Join("custom.googleapis.com", "opencensus", v.Name) + } +} + +func newLabels(defaults map[string]labelValue, tags []tag.Tag) map[string]string { + labels := make(map[string]string) + for k, lbl := range defaults { + labels[sanitize(k)] = lbl.val + } + for _, tag := range tags { + labels[sanitize(tag.Key.Name())] = tag.Value + } + return labels +} + +func newLabelDescriptors(defaults map[string]labelValue, keys []tag.Key) []*labelpb.LabelDescriptor { + labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(keys)+len(defaults)) + for key, lbl := range defaults { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key), + Description: lbl.desc, + ValueType: labelpb.LabelDescriptor_STRING, + }) + } + for _, key := range keys { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key.Name()), + ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags + }) + } + return labelDescriptors +} + +func (e *statsExporter) equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error { + var aggTypeMatch bool + switch md.ValueType { + case metricpb.MetricDescriptor_INT64: + if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) { + return fmt.Errorf("stackdriver metric descriptor was not created as int64") + } + aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue + case metricpb.MetricDescriptor_DOUBLE: + if _, ok := m.(*stats.Float64Measure); !ok { + return fmt.Errorf("stackdriver metric descriptor was not created as double") + } + aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue + case metricpb.MetricDescriptor_DISTRIBUTION: + aggTypeMatch = agg.Type == view.AggTypeDistribution + } + + if !aggTypeMatch { + return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type) + } + + labels := make(map[string]struct{}, len(keys)+len(e.defaultLabels)) + for _, k := range keys { + labels[sanitize(k.Name())] = struct{}{} + } + for k := range e.defaultLabels { + labels[sanitize(k)] = struct{}{} + } + + for _, k := range md.Labels { + if _, ok := labels[k.Key]; !ok { + return fmt.Errorf("stackdriver metric descriptor %q was not created with label %q", md.Type, k) + } + delete(labels, k.Key) + } + + if len(labels) > 0 { + extra := make([]string, 0, len(labels)) + for k := range labels { + extra = append(extra, k) + } + return fmt.Errorf("stackdriver metric descriptor %q contains unexpected labels: %s", md.Type, strings.Join(extra, ", ")) + } + + return nil +} + +var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return c.CreateMetricDescriptor(ctx, mdr) +} + +var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return c.GetMetricDescriptor(ctx, mdr) +} + +var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error { + return c.CreateTimeSeries(ctx, ts) +} + +var knownExternalMetricPrefixes = []string{ + "custom.googleapis.com/", + "external.googleapis.com/", +} + +// builtinMetric returns true if a MetricType is a heuristically known +// built-in Stackdriver metric +func builtinMetric(metricType string) bool { + for _, knownExternalMetric := range knownExternalMetricPrefixes { + if strings.HasPrefix(metricType, knownExternalMetric) { + return false + } + } + return true +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go new file mode 100644 index 000000000..71e7f36d2 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go @@ -0,0 +1,178 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + tracingclient "cloud.google.com/go/trace/apiv2" + "github.com/golang/protobuf/proto" + "go.opencensus.io/trace" + "google.golang.org/api/support/bundler" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +// traceExporter is an implementation of trace.Exporter that uploads spans to +// Stackdriver. +// +type traceExporter struct { + o Options + projectID string + bundler *bundler.Bundler + // uploadFn defaults to uploadSpans; it can be replaced for tests. + uploadFn func(spans []*tracepb.Span) + overflowLogger + client *tracingclient.Client +} + +var _ trace.Exporter = (*traceExporter)(nil) + +func newTraceExporter(o Options) (*traceExporter, error) { + ctx := o.Context + if ctx == nil { + ctx = context.Background() + } + client, err := tracingclient.NewClient(ctx, o.TraceClientOptions...) + if err != nil { + return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err) + } + return newTraceExporterWithClient(o, client), nil +} + +const defaultBufferedByteLimit = 8 * 1024 * 1024 + +func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter { + e := &traceExporter{ + projectID: o.ProjectID, + client: c, + o: o, + } + b := bundler.NewBundler((*tracepb.Span)(nil), func(bundle interface{}) { + e.uploadFn(bundle.([]*tracepb.Span)) + }) + if o.BundleDelayThreshold > 0 { + b.DelayThreshold = o.BundleDelayThreshold + } else { + b.DelayThreshold = 2 * time.Second + } + if o.BundleCountThreshold > 0 { + b.BundleCountThreshold = o.BundleCountThreshold + } else { + b.BundleCountThreshold = 50 + } + // The measured "bytes" are not really bytes, see exportReceiver. + b.BundleByteThreshold = b.BundleCountThreshold * 200 + b.BundleByteLimit = b.BundleCountThreshold * 1000 + if o.TraceSpansBufferMaxBytes > 0 { + b.BufferedByteLimit = o.TraceSpansBufferMaxBytes + } else { + b.BufferedByteLimit = defaultBufferedByteLimit + } + + e.bundler = b + e.uploadFn = e.uploadSpans + return e +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *traceExporter) ExportSpan(s *trace.SpanData) { + protoSpan := protoFromSpanData(s, e.projectID, e.o.Resource) + protoSize := proto.Size(protoSpan) + err := e.bundler.Add(protoSpan, protoSize) + switch err { + case nil: + return + case bundler.ErrOversizedItem: + case bundler.ErrOverflow: + e.overflowLogger.log() + default: + e.o.handleError(err) + } +} + +// Flush waits for exported trace spans to be uploaded. +// +// This is useful if your program is ending and you do not want to lose recent +// spans. +func (e *traceExporter) Flush() { + e.bundler.Flush() +} + +// uploadSpans uploads a set of spans to Stackdriver. +func (e *traceExporter) uploadSpans(spans []*tracepb.Span) { + req := tracepb.BatchWriteSpansRequest{ + Name: "projects/" + e.projectID, + Spans: spans, + } + // Create a never-sampled span to prevent traces associated with exporter. + ctx, cancel := e.o.newContextWithTimeout() + defer cancel() + ctx, span := trace.StartSpan( + ctx, + "contrib.go.opencensus.io/exporter/stackdriver.uploadSpans", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) + + err := e.client.BatchWriteSpans(ctx, &req) + if err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + e.o.handleError(err) + } +} + +// overflowLogger ensures that at most one overflow error log message is +// written every 5 seconds. +type overflowLogger struct { + mu sync.Mutex + pause bool + accum int +} + +func (o *overflowLogger) delay() { + o.pause = true + time.AfterFunc(5*time.Second, func() { + o.mu.Lock() + defer o.mu.Unlock() + switch { + case o.accum == 0: + o.pause = false + case o.accum == 1: + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.accum = 0 + o.delay() + default: + log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum) + o.accum = 0 + o.delay() + } + }) +} + +func (o *overflowLogger) log() { + o.mu.Lock() + defer o.mu.Unlock() + if !o.pause { + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.delay() + } else { + o.accum++ + } +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go new file mode 100644 index 000000000..2d259cf3c --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go @@ -0,0 +1,277 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "fmt" + "math" + "time" + "unicode/utf8" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/trace" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 + maxAttributeStringValue = 256 + agentLabel = "g.co/agent" + + labelHTTPHost = `/http/host` + labelHTTPMethod = `/http/method` + labelHTTPStatusCode = `/http/status_code` + labelHTTPPath = `/http/path` + labelHTTPUserAgent = `/http/user_agent` +) + +// proto returns a protocol buffer representation of a SpanData. +func protoFromSpanData(s *trace.SpanData, projectID string, mr *monitoredrespb.MonitoredResource) *tracepb.Span { + if s == nil { + return nil + } + + traceIDString := s.SpanContext.TraceID.String() + spanIDString := s.SpanContext.SpanID.String() + + name := s.Name + switch s.SpanKind { + case trace.SpanKindClient: + name = "Sent." + name + case trace.SpanKindServer: + name = "Recv." + name + } + + sp := &tracepb.Span{ + Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, + SpanId: spanIDString, + DisplayName: trunc(name, 128), + StartTime: timestampProto(s.StartTime), + EndTime: timestampProto(s.EndTime), + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, + } + if p := s.ParentSpanID; p != (trace.SpanID{}) { + sp.ParentSpanId = p.String() + } + if s.Status.Code != 0 || s.Status.Message != "" { + sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message} + } + + var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int + copyAttributes(&sp.Attributes, s.Attributes) + + // Copy MonitoredResources as span Attributes + sp.Attributes = copyMonitoredResourceAttributes(sp.Attributes, mr) + + as := s.Annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)} + copyAttributes(&annotation.Attributes, a.Attributes) + event := &tracepb.Span_TimeEvent{ + Time: timestampProto(a.Time), + Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation}, + } + annotations++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event) + } + + if sp.Attributes == nil { + sp.Attributes = &tracepb.Span_Attributes{ + AttributeMap: make(map[string]*tracepb.AttributeValue), + } + } + sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: trunc(userAgent, maxAttributeStringValue), + }, + } + + es := s.MessageEvents + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{ + Time: timestampProto(e.Time), + Value: &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: e.MessageID, + UncompressedSizeBytes: e.UncompressedByteSize, + CompressedSizeBytes: e.CompressedByteSize, + }, + }, + }) + } + + if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 { + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + } + + if len(s.Links) > 0 { + sp.Links = &tracepb.Span_Links{} + sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links)) + for _, l := range s.Links { + link := &tracepb.Span_Link{ + TraceId: l.TraceID.String(), + SpanId: l.SpanID.String(), + Type: tracepb.Span_Link_Type(l.Type), + } + copyAttributes(&link.Attributes, l.Attributes) + sp.Links.Link = append(sp.Links.Link, link) + } + } + return sp +} + +// timestampProto creates a timestamp proto for a time.Time. +func timestampProto(t time.Time) *timestamppb.Timestamp { + return ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// copyMonitoredResourceAttributes copies proto monitoredResource to proto map field (Span_Attributes) +// it creates the map if it is nil. +func copyMonitoredResourceAttributes(out *tracepb.Span_Attributes, mr *monitoredrespb.MonitoredResource) *tracepb.Span_Attributes { + if mr == nil { + return out + } + if out == nil { + out = &tracepb.Span_Attributes{} + } + if out.AttributeMap == nil { + out.AttributeMap = make(map[string]*tracepb.AttributeValue) + } + for k, v := range mr.Labels { + av := attributeValue(v) + out.AttributeMap[fmt.Sprintf("g.co/r/%s/%s", mr.Type, k)] = av + } + return out +} + +// copyAttributes copies a map of attributes to a proto map field. +// It creates the map if it is nil. +func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) { + if len(in) == 0 { + return + } + if *out == nil { + *out = &tracepb.Span_Attributes{} + } + if (*out).AttributeMap == nil { + (*out).AttributeMap = make(map[string]*tracepb.AttributeValue) + } + var dropped int32 + for key, value := range in { + av := attributeValue(value) + if av == nil { + continue + } + switch key { + case ochttp.PathAttribute: + (*out).AttributeMap[labelHTTPPath] = av + case ochttp.HostAttribute: + (*out).AttributeMap[labelHTTPHost] = av + case ochttp.MethodAttribute: + (*out).AttributeMap[labelHTTPMethod] = av + case ochttp.UserAgentAttribute: + (*out).AttributeMap[labelHTTPUserAgent] = av + case ochttp.StatusCodeAttribute: + (*out).AttributeMap[labelHTTPStatusCode] = av + default: + if len(key) > 128 { + dropped++ + continue + } + (*out).AttributeMap[key] = av + } + } + (*out).DroppedAttributesCount = dropped +} + +func attributeValue(v interface{}) *tracepb.AttributeValue { + switch value := v.(type) { + case bool: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_BoolValue{BoolValue: value}, + } + case int64: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_IntValue{IntValue: value}, + } + case string: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)}, + } + } + return nil +} + +// trunc returns a TruncatableString truncated to the given limit. +func trunc(s string, limit int) *tracepb.TruncatableString { + if len(s) > limit { + b := []byte(s[:limit]) + for { + r, size := utf8.DecodeLastRune(b) + if r == utf8.RuneError && size == 1 { + b = b[:len(b)-1] + } else { + break + } + } + return &tracepb.TruncatableString{ + Value: string(b), + TruncatedByteCount: clip32(len(s) - len(b)), + } + } + return &tracepb.TruncatableString{ + Value: s, + TruncatedByteCount: 0, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..899129ecc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..99849c0e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..a2c5817c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..1a3d106d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..142a7a01c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..11c52c389 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..710eb432f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..645df2450 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..709605384 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,96 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..a397b0d04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,116 @@ +package client + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// client.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() int { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + + minTime = 500 + } + + retryCount := r.RetryCount + if throttle && retryCount > 8 { + retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 + } + + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() + } + + return true +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 000000000..7b5e1276a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,190 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..920e9fddf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,13 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..10634d173 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,536 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 000000000..2866f9a7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 000000000..3718b26e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 000000000..66c5945db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,56 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 000000000..9c29f29af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 000000000..304fd1561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..ff5d58e06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,387 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..f8853d78a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,228 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 000000000..ab69c7a6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..3ad1e798d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..83bbc311b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,293 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + + m sync.RWMutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + nil) + } + if c.forceRefresh { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..43d4ed386 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,180 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..c2b2c5d65 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,203 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..54c5cf733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 000000000..1980c8c14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,425 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = 1024 + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..e15514958 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,150 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..531139e39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,55 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..2e528d130 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,312 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 000000000..25a66d1dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 000000000..4b19e2800 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 000000000..5bacc791a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 000000000..514fc3739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 000000000..54a99280c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 000000000..0d3684914 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,265 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..23bb639e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 000000000..ca0ee1dcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 000000000..4fcb61618 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..2c8d5f56d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,169 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + err := req.Send() + + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + err := req.Send() + + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + err := req.Send() + + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + if len(resp) == 0 { + return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..f0c1d31e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,152 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 000000000..87b9ff3ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,188 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 000000000..5e6346d83 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,4684 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuNorth1RegionID = "eu-north-1" // EU (Stockholm). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-north-1": region{ + Description: "EU (Stockholm)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "sts.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 000000000..ca8fc828e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 000000000..84316b92c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 000000000..f82babf6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,449 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 000000000..ff6f76db6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,307 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 000000000..0fdfcc56e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..fa06f7a8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 000000000..91a6f277a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..6ed15b2ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 000000000..d9b37f4d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,18 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..627ec722c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,322 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..79f79602b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..b0c2ef4fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,60 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, sdkio.SeekStart) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..2f0c4a90e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,688 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + SanitizeHostForHeader(httpReq) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + willRetry = "will retry" + notRetrying = "not retrying" + retryCount = "retry %v/%v" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + } + + var body io.ReadCloser + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } else if !shouldRetryError(r.Error) { + return err + } else { + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + r.prepareRetry() + continue + } + } +} + +func (r *Request) prepareRetry() { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + return shouldRetryError(err.OrigErr()) + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 000000000..e36e468b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 000000000..7c6a8000f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,33 @@ +// +build go1.8 + +package request + +import ( + "net/http" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 000000000..a7365cd1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 000000000..307fa0705 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..a633ed5ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,264 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..d0aa54c6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,163 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the client.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) + } + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) + } + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 000000000..09a44eb98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..8630683f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 000000000..4601f883c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 000000000..ea9ebb6f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 000000000..fec39dfc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 000000000..1c5a5391e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 000000000..9fd663681 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,207 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + // Credentials from Assume Role with specific credentials source. + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + return resolveCredsFromSource(cfg, envCfg, sharedCfg, handlers, sessOpts) + } + + // Credentials from environment variables + if len(envCfg.Creds.AccessKeyID) > 0 { + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + } + + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + // Assume IAM role with credentials source from a different profile. + cred, err := resolveCredsFromProfile(cfg, envCfg, *sharedCfg.AssumeRoleSource, handlers, sessOpts) + if err != nil { + return nil, err + } + + cfgCp := *cfg + cfgCp.Credentials = cred + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + // Static Credentials from Shared Config/Credentials file. + return credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ), nil + + } else if len(sharedCfg.CredentialProcess) > 0 { + // Get credentials from CredentialProcess + cred := processcreds.NewCredentials(sharedCfg.CredentialProcess) + // if RoleARN is provided, so the obtained cred from the Credential Process to assume the role using RoleARN + if len(sharedCfg.AssumeRole.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = cred + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + return cred, nil + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + // Assume IAM Role with specific credential source. + return resolveCredsFromSource(cfg, envCfg, sharedCfg, handlers, sessOpts) + } + + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }), nil +} + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + // if both credential_source and source_profile have been set, return an + // error as this is undefined behavior. Only one can be used at a time + // within a profile. + if len(sharedCfg.AssumeRole.SourceProfile) > 0 { + return nil, ErrSharedConfigSourceCollision + } + + cfgCp := *cfg + switch sharedCfg.AssumeRole.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + case credSourceEnvironment: + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + opt.Duration = sessOpts.AssumeRoleDuration + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..38a7b05a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,273 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess := session.Must(session.NewSession()) + + // Create a Session with a custom region + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) + + // Create a S3 client instance from a session + sess := session.Must(session.NewSession()) + + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) + + // Specify profile to load for the session's config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + })) + + // Specify profile for config and region for requests + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + })) + + // Force enable Shared Config support + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They are from a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..cdb42497e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,241 @@ +package session + +import ( + "os" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string + CSMHost string + + enableEndpointDiscovery string + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = EnvProviderName + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..5da98abe7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,608 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + + return s + } + + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + err := enableCSM(&s.Handlers, envCfg.CSMClientID, + envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + if err != nil { + err = fmt.Errorf("failed to enable CSM, %v", err) + s.Config.Logger.Log("ERROR:", err.Error()) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, + clientID, host, port string, + logger aws.Logger, +) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + if envCfg.CSMEnabled { + err := enableCSM(&s.Handlers, envCfg.CSMClientID, + envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + if err != nil { + return nil, err + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + return nil +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..324927f56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,341 @@ +package session + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process + credentialProcessKey = `credential_process` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + CredentialSource string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // An external process to request credentials + CredentialProcess string + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + if len(cfg.AssumeRole.CredentialSource) > 0 { + // setAssumeRoleSource is only called when source_profile is found. + // If both source_profile and credential_source are set, then + // ErrSharedConfigSourceCollision will be returned + return ErrSharedConfigSourceCollision + } + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + + // Chain if profile depends of other profiles + if len(assumeRoleSrc.AssumeRole.SourceProfile) > 0 { + err := assumeRoleSrc.setAssumeRoleSource(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + } + + if cfg.AssumeRole.SourceProfile == origProfile || len(assumeRoleSrc.AssumeRole.SourceProfile) == 0 { + //Check if at least either Credential Source, static creds, or credential process is set to retain credentials. + if len(assumeRoleSrc.AssumeRole.CredentialSource) == 0 && len(assumeRoleSrc.Creds.AccessKeyID) == 0 && len(assumeRoleSrc.CredentialProcess) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + // Shared Credentials + akid := section.String(accessKeyIDKey) + secret := section.String(secretAccessKey) + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.String(roleArnKey) + srcProfile := section.String(sourceProfileKey) + credentialSource := section.String(credentialSourceKey) + credentialProcess := section.String(credentialProcessKey) + //Has source to make sure the Assume Role has at least either srcProfile, credential Source, or credential Process. + hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 || len(credentialProcess) > 0 + if len(roleArn) > 0 && hasSource { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + CredentialSource: credentialSource, + ExternalID: section.String(externalIDKey), + MFASerial: section.String(mfaSerialKey), + RoleSessionName: section.String(roleSessionNameKey), + } + } + + // `credential_process` + if credProc := section.String(credentialProcessKey); len(credProc) > 0 { + cfg.CredentialProcess = credProc + } + + // Region + if v := section.String(regionKey); len(v) > 0 { + cfg.Region = v + } + + // Endpoint discovery + if section.Has(enableEndpointDiscoveryKey) { + v := section.Bool(enableEndpointDiscoveryKey) + cfg.EnableEndpointDiscovery = &v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 000000000..6aa2ed241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 000000000..bd082e9d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..523db79f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,796 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } + + return nil +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil) +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..455091540 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,207 @@ +package aws + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000..6192b2455 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000..0210d2720 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..55f246463 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.20.20" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 000000000..e83a99886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 000000000..0895d53cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 000000000..0b76999ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 000000000..25ce0fe13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 000000000..04345a54c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 000000000..91ba2a59d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 000000000..8d462f77e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 000000000..3b0ca7afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 000000000..582c024ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 000000000..e56dcee2f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,349 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assiging a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 000000000..24df543d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 000000000..e52ac399f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 000000000..a45c0bc56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 000000000..8a84c7cbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 000000000..457287019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 000000000..7f01cf7c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 000000000..f82095ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 000000000..6bb696447 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + + s.Continue() + return false + } + s.prevTok = tok + + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true + s.prevTok = emptyToken +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 000000000..18f3fe893 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 000000000..305999d29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 000000000..94841c324 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 000000000..99915f7f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 000000000..7ffb4ae06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 000000000..5aa9137e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 000000000..e5f005613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 000000000..0c9802d87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 000000000..38ea61afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 000000000..7da8a49ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000..ebcbc2b40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..d7d42db0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 000000000..915b0fcaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 000000000..864fb6704 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 000000000..ea0da79a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,250 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 000000000..776d11018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 000000000..e21614a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..0cb99eb57 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..75866d012 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..f69c1efc9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..831b0110c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..1301b149d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,310 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..de021367d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,225 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 000000000..b7ed6c6f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..da1a68111 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..cf981fe95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,306 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..7108d3800 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,291 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..515ce1521 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,148 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..9e610591a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,2580 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) +// in the IAM User Guide. +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service API operations. +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. +// +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers, and then how to use the information from these providers to +// get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. +// +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM identity whose credentials are used to call +// the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// is 43,200 seconds (12 hours). Temporary credentials that are obtained by +// using AWS account root user credentials have a maximum duration of 3,600 +// seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM API operations. +// +// * You cannot call any STS API operations except GetCallerIdentity. +// +// Permissions +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. The only exception +// is when the credentials are used to access a resource that has a resource-based +// policy that specifically references the federated user session in the Principal +// element of the policy. When you pass session policies, the session permissions +// are the intersection of the IAM user policies and the session policies that +// you pass. This gives you a way to further restrict the permissions for a +// federated user. You cannot use session policies to grant more permissions +// than those that are defined in the permissions policy of the IAM user. For +// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies shouldn't exceed 2048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 000000000..fcb720dca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,108 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// By default, AWS Security Token Service (STS) is available as a global service, +// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. +// Global requests map to the US East (N. Virginia) region. AWS recommends using +// Regional AWS STS endpoints instead of the global endpoint to reduce latency, +// build in redundancy, and increase session token validity. For more information, +// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Most AWS Regions are enabled for operations in all AWS services by default. +// Those Regions are automatically activated for use with AWS STS. Some Regions, +// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more +// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. When you enable these AWS Regions, they are +// automatically activated for use with AWS STS. You cannot activate the STS +// endpoint for a Region that is disabled. Tokens that are valid in all AWS +// Regions are longer than tokens that are valid in Regions that are enabled +// by default. Changing this setting might affect existing systems where you +// temporarily store tokens. For more information, see Managing Global Endpoint +// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) +// in the IAM User Guide. +// +// After you activate a Region for use with AWS STS, you can direct AWS STS +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, +// the calls are directed to the global endpoint of https://sts.amazonaws.com. +// +// To view the list of AWS STS endpoints and whether they are active by default, +// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) +// in the IAM User Guide. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. +// +// If you activate AWS STS endpoints in Regions other than the default global +// endpoint, then you must also turn on CloudTrail logging in those Regions. +// This is necessary to record any AWS STS API calls that are made in those +// Regions. For more information, see Turning On CloudTrail in Additional Regions +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) +// in the AWS CloudTrail User Guide. +// +// AWS Security Token Service (STS) is a global service with a single endpoint +// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls +// to a global service. However, because this endpoint is physically located +// in the US East (N. Virginia) Region, your logs list us-east-1 as the event +// Region. CloudTrail does not write these logs to the US East (Ohio) Region +// unless you choose to include global service logs in that Region. CloudTrail +// writes calls to all Regional endpoints to their respective Regions. For example, +// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) +// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU +// (Frankfurt) Region. +// +// To learn more about CloudTrail, including how to turn it on and find your +// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 000000000..41ea09c35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..185c914d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..d7d14f8eb --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS new file mode 100644 index 000000000..e068e731e --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS @@ -0,0 +1 @@ +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go new file mode 100644 index 000000000..2f12e428e --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -0,0 +1,356 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LibraryInfo_Language int32 + +const ( + LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 + LibraryInfo_CPP LibraryInfo_Language = 1 + LibraryInfo_C_SHARP LibraryInfo_Language = 2 + LibraryInfo_ERLANG LibraryInfo_Language = 3 + LibraryInfo_GO_LANG LibraryInfo_Language = 4 + LibraryInfo_JAVA LibraryInfo_Language = 5 + LibraryInfo_NODE_JS LibraryInfo_Language = 6 + LibraryInfo_PHP LibraryInfo_Language = 7 + LibraryInfo_PYTHON LibraryInfo_Language = 8 + LibraryInfo_RUBY LibraryInfo_Language = 9 +) + +var LibraryInfo_Language_name = map[int32]string{ + 0: "LANGUAGE_UNSPECIFIED", + 1: "CPP", + 2: "C_SHARP", + 3: "ERLANG", + 4: "GO_LANG", + 5: "JAVA", + 6: "NODE_JS", + 7: "PHP", + 8: "PYTHON", + 9: "RUBY", +} + +var LibraryInfo_Language_value = map[string]int32{ + "LANGUAGE_UNSPECIFIED": 0, + "CPP": 1, + "C_SHARP": 2, + "ERLANG": 3, + "GO_LANG": 4, + "JAVA": 5, + "NODE_JS": 6, + "PHP": 7, + "PYTHON": 8, + "RUBY": 9, +} + +func (x LibraryInfo_Language) String() string { + return proto.EnumName(LibraryInfo_Language_name, int32(x)) +} + +func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2, 0} +} + +// Identifier metadata of the Node (Application instrumented with OpenCensus) +// that connects to OpenCensus Agent. +// In the future we plan to extend the identifier proto definition to support +// additional information (e.g cloud id, etc.) +type Node struct { + // Identifier that uniquely identifies a process within a VM/container. + Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Information on the OpenCensus Library that initiates the stream. + LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` + // Additional information on service. + ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` + // Additional attributes. + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{0} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetIdentifier() *ProcessIdentifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (m *Node) GetLibraryInfo() *LibraryInfo { + if m != nil { + return m.LibraryInfo + } + return nil +} + +func (m *Node) GetServiceInfo() *ServiceInfo { + if m != nil { + return m.ServiceInfo + } + return nil +} + +func (m *Node) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// Identifier that uniquely identifies a process within a VM/container. +type ProcessIdentifier struct { + // The host name. Usually refers to the machine/container name. + // For example: os.Hostname() in Go, socket.gethostname() in Python. + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + // Process id. + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + // Start time of this ProcessIdentifier. Represented in epoch time. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } +func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } +func (*ProcessIdentifier) ProtoMessage() {} +func (*ProcessIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{1} +} + +func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) +} +func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) +} +func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessIdentifier.Merge(m, src) +} +func (m *ProcessIdentifier) XXX_Size() int { + return xxx_messageInfo_ProcessIdentifier.Size(m) +} +func (m *ProcessIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo + +func (m *ProcessIdentifier) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *ProcessIdentifier) GetPid() uint32 { + if m != nil { + return m.Pid + } + return 0 +} + +func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +// Information on OpenCensus Library. +type LibraryInfo struct { + // Language of OpenCensus Library. + Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` + // Version of Agent exporter of Library. + ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` + // Version of OpenCensus Library. + CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } +func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } +func (*LibraryInfo) ProtoMessage() {} +func (*LibraryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2} +} + +func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) +} +func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) +} +func (m *LibraryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LibraryInfo.Merge(m, src) +} +func (m *LibraryInfo) XXX_Size() int { + return xxx_messageInfo_LibraryInfo.Size(m) +} +func (m *LibraryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LibraryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo + +func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { + if m != nil { + return m.Language + } + return LibraryInfo_LANGUAGE_UNSPECIFIED +} + +func (m *LibraryInfo) GetExporterVersion() string { + if m != nil { + return m.ExporterVersion + } + return "" +} + +func (m *LibraryInfo) GetCoreLibraryVersion() string { + if m != nil { + return m.CoreLibraryVersion + } + return "" +} + +// Additional service information. +type ServiceInfo struct { + // Name of the service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } +func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } +func (*ServiceInfo) ProtoMessage() {} +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{3} +} + +func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) +} +func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) +} +func (m *ServiceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceInfo.Merge(m, src) +} +func (m *ServiceInfo) XXX_Size() int { + return xxx_messageInfo_ServiceInfo.Size(m) +} +func (m *ServiceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo + +func (m *ServiceInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) + proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") + proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") + proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") + proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) +} + +var fileDescriptor_126c72ed8a252c84 = []byte{ + // 590 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e, + 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee, + 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01, + 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde, + 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9, + 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2, + 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89, + 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9, + 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7, + 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39, + 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b, + 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b, + 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13, + 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06, + 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d, + 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67, + 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2, + 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a, + 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a, + 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76, + 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23, + 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c, + 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c, + 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92, + 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e, + 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51, + 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14, + 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83, + 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0, + 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86, + 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4, + 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd, + 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9, + 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c, + 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70, + 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go new file mode 100644 index 000000000..6759ced88 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go @@ -0,0 +1,1370 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/metrics/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The kind of metric. It describes how the data is reported. +// +// A gauge is an instantaneous measurement of a value. +// +// A cumulative measurement is a value accumulated over a time interval. In +// a time series, cumulative measurements should have the same start time, +// increasing values and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. +type MetricDescriptor_Type int32 + +const ( + // Do not use this default value. + MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 + // Integer gauge. The value can go both up and down. + MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 + // Floating point gauge. The value can go both up and down. + MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 + // Distribution gauge measurement. The count and sum can go both up and + // down. Recorded values are always >= 0. + // Used in scenarios like a snapshot of time the current items in a queue + // have spent there. + MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 + // Integer cumulative measurement. The value cannot decrease, if resets + // then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 + // Floating point cumulative measurement. The value cannot decrease, if + // resets then the start_time should also be reset. Recorded values are + // always >= 0. + MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 + // Distribution cumulative measurement. The count and sum cannot decrease, + // if resets then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 + // Some frameworks implemented Histograms as a summary of observations + // (usually things like request durations and response sizes). While it + // also provides a total count of observations and a sum of all observed + // values, it calculates configurable percentiles over a sliding time + // window. This is not recommended, since it cannot be aggregated. + MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 +) + +var MetricDescriptor_Type_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "GAUGE_INT64", + 2: "GAUGE_DOUBLE", + 3: "GAUGE_DISTRIBUTION", + 4: "CUMULATIVE_INT64", + 5: "CUMULATIVE_DOUBLE", + 6: "CUMULATIVE_DISTRIBUTION", + 7: "SUMMARY", +} + +var MetricDescriptor_Type_value = map[string]int32{ + "UNSPECIFIED": 0, + "GAUGE_INT64": 1, + "GAUGE_DOUBLE": 2, + "GAUGE_DISTRIBUTION": 3, + "CUMULATIVE_INT64": 4, + "CUMULATIVE_DOUBLE": 5, + "CUMULATIVE_DISTRIBUTION": 6, + "SUMMARY": 7, +} + +func (x MetricDescriptor_Type) String() string { + return proto.EnumName(MetricDescriptor_Type_name, int32(x)) +} + +func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1, 0} +} + +// Defines a Metric which has one or more timeseries. +type Metric struct { + // The descriptor of the Metric. This is an optimization for network wire + // size, from data-model perspective a Metric contains always a + // MetricDescriptor. + // + // Types that are valid to be assigned to Descriptor_: + // *Metric_MetricDescriptor + // *Metric_Name + Descriptor_ isMetric_Descriptor_ `protobuf_oneof:"descriptor"` + // One or more timeseries for a single metric, where each timeseries has + // one or more points. + Timeseries []*TimeSeries `protobuf:"bytes,3,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + // The resource for the metric. If unset, it may be set to a default value + // provided for a sequence of messages in an RPC stream. + Resource *v1.Resource `protobuf:"bytes,4,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{0} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +type isMetric_Descriptor_ interface { + isMetric_Descriptor_() +} + +type Metric_MetricDescriptor struct { + MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3,oneof"` +} + +type Metric_Name struct { + Name string `protobuf:"bytes,2,opt,name=name,proto3,oneof"` +} + +func (*Metric_MetricDescriptor) isMetric_Descriptor_() {} + +func (*Metric_Name) isMetric_Descriptor_() {} + +func (m *Metric) GetDescriptor_() isMetric_Descriptor_ { + if m != nil { + return m.Descriptor_ + } + return nil +} + +func (m *Metric) GetMetricDescriptor() *MetricDescriptor { + if x, ok := m.GetDescriptor_().(*Metric_MetricDescriptor); ok { + return x.MetricDescriptor + } + return nil +} + +func (m *Metric) GetName() string { + if x, ok := m.GetDescriptor_().(*Metric_Name); ok { + return x.Name + } + return "" +} + +func (m *Metric) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func (m *Metric) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Metric) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Metric_OneofMarshaler, _Metric_OneofUnmarshaler, _Metric_OneofSizer, []interface{}{ + (*Metric_MetricDescriptor)(nil), + (*Metric_Name)(nil), + } +} + +func _Metric_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Metric) + // descriptor + switch x := m.Descriptor_.(type) { + case *Metric_MetricDescriptor: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MetricDescriptor); err != nil { + return err + } + case *Metric_Name: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case nil: + default: + return fmt.Errorf("Metric.Descriptor_ has unexpected type %T", x) + } + return nil +} + +func _Metric_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Metric) + switch tag { + case 1: // descriptor.metric_descriptor + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MetricDescriptor) + err := b.DecodeMessage(msg) + m.Descriptor_ = &Metric_MetricDescriptor{msg} + return true, err + case 2: // descriptor.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Descriptor_ = &Metric_Name{x} + return true, err + default: + return false, nil + } +} + +func _Metric_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Metric) + // descriptor + switch x := m.Descriptor_.(type) { + case *Metric_MetricDescriptor: + s := proto.Size(x.MetricDescriptor) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Metric_Name: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Defines a metric type and its schema. +type MetricDescriptor struct { + // The metric type, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` + // The label keys associated with the metric descriptor. + LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1} +} + +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetType() MetricDescriptor_Type { + if m != nil { + return m.Type + } + return MetricDescriptor_UNSPECIFIED +} + +func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { + if m != nil { + return m.LabelKeys + } + return nil +} + +// Defines a label key associated with a metric descriptor. +type LabelKey struct { + // The key for the label. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // A human-readable description of what this label key represents. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelKey) Reset() { *m = LabelKey{} } +func (m *LabelKey) String() string { return proto.CompactTextString(m) } +func (*LabelKey) ProtoMessage() {} +func (*LabelKey) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{2} +} + +func (m *LabelKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelKey.Unmarshal(m, b) +} +func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) +} +func (m *LabelKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelKey.Merge(m, src) +} +func (m *LabelKey) XXX_Size() int { + return xxx_messageInfo_LabelKey.Size(m) +} +func (m *LabelKey) XXX_DiscardUnknown() { + xxx_messageInfo_LabelKey.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelKey proto.InternalMessageInfo + +func (m *LabelKey) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelKey) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// A collection of data points that describes the time-varying values +// of a metric. +type TimeSeries struct { + // Must be present for cumulative metrics. The time when the cumulative value + // was reset to zero. Exclusive. The cumulative value is over the time interval + // (start_timestamp, timestamp]. If not specified, the backend can use the + // previous recorded value. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + // The set of label values that uniquely identify this timeseries. Applies to + // all points. The order of label values must match that of label keys in the + // metric descriptor. + LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The data points of this timeseries. Point.value type MUST match the + // MetricDescriptor.type. + Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{3} +} + +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +func (m *TimeSeries) GetLabelValues() []*LabelValue { + if m != nil { + return m.LabelValues + } + return nil +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +type LabelValue struct { + // The value for the label. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // If false the value field is ignored and considered not set. + // This is used to differentiate a missing label from an empty string. + HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValue) Reset() { *m = LabelValue{} } +func (m *LabelValue) String() string { return proto.CompactTextString(m) } +func (*LabelValue) ProtoMessage() {} +func (*LabelValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{4} +} + +func (m *LabelValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelValue.Unmarshal(m, b) +} +func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) +} +func (m *LabelValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValue.Merge(m, src) +} +func (m *LabelValue) XXX_Size() int { + return xxx_messageInfo_LabelValue.Size(m) +} +func (m *LabelValue) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValue.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValue proto.InternalMessageInfo + +func (m *LabelValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *LabelValue) GetHasValue() bool { + if m != nil { + return m.HasValue + } + return false +} + +// A timestamped measurement. +type Point struct { + // The moment when this point was recorded. Inclusive. + // If not specified, the timestamp will be decided by the backend. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The actual point value. + // + // Types that are valid to be assigned to Value: + // *Point_Int64Value + // *Point_DoubleValue + // *Point_DistributionValue + // *Point_SummaryValue + Value isPoint_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{5} +} + +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (m *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(m, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type isPoint_Value interface { + isPoint_Value() +} + +type Point_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Point_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Point_DistributionValue struct { + DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +type Point_SummaryValue struct { + SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` +} + +func (*Point_Int64Value) isPoint_Value() {} + +func (*Point_DoubleValue) isPoint_Value() {} + +func (*Point_DistributionValue) isPoint_Value() {} + +func (*Point_SummaryValue) isPoint_Value() {} + +func (m *Point) GetValue() isPoint_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Point) GetInt64Value() int64 { + if x, ok := m.GetValue().(*Point_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *Point) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*Point_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Point) GetDistributionValue() *DistributionValue { + if x, ok := m.GetValue().(*Point_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +func (m *Point) GetSummaryValue() *SummaryValue { + if x, ok := m.GetValue().(*Point_SummaryValue); ok { + return x.SummaryValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Point) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Point_OneofMarshaler, _Point_OneofUnmarshaler, _Point_OneofSizer, []interface{}{ + (*Point_Int64Value)(nil), + (*Point_DoubleValue)(nil), + (*Point_DistributionValue)(nil), + (*Point_SummaryValue)(nil), + } +} + +func _Point_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Point) + // value + switch x := m.Value.(type) { + case *Point_Int64Value: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Int64Value)) + case *Point_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *Point_DistributionValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DistributionValue); err != nil { + return err + } + case *Point_SummaryValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SummaryValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Point.Value has unexpected type %T", x) + } + return nil +} + +func _Point_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Point) + switch tag { + case 2: // value.int64_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Point_Int64Value{int64(x)} + return true, err + case 3: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &Point_DoubleValue{math.Float64frombits(x)} + return true, err + case 4: // value.distribution_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DistributionValue) + err := b.DecodeMessage(msg) + m.Value = &Point_DistributionValue{msg} + return true, err + case 5: // value.summary_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SummaryValue) + err := b.DecodeMessage(msg) + m.Value = &Point_SummaryValue{msg} + return true, err + default: + return false, nil + } +} + +func _Point_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Point) + // value + switch x := m.Value.(type) { + case *Point_Int64Value: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Int64Value)) + case *Point_DoubleValue: + n += 1 // tag and wire + n += 8 + case *Point_DistributionValue: + s := proto.Size(x.DistributionValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Point_SummaryValue: + s := proto.Size(x.SummaryValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type DistributionValue struct { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // Don't change bucket boundaries within a TimeSeries if your backend doesn't + // support this. To save network bandwidth this field can be sent only the + // first time a metric is sent when using a streaming RPC. + BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue) Reset() { *m = DistributionValue{} } +func (m *DistributionValue) String() string { return proto.CompactTextString(m) } +func (*DistributionValue) ProtoMessage() {} +func (*DistributionValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6} +} + +func (m *DistributionValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue.Unmarshal(m, b) +} +func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) +} +func (m *DistributionValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue.Merge(m, src) +} +func (m *DistributionValue) XXX_Size() int { + return xxx_messageInfo_DistributionValue.Size(m) +} +func (m *DistributionValue) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue proto.InternalMessageInfo + +func (m *DistributionValue) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { + if m != nil { + return m.Buckets + } + return nil +} + +// A Distribution may optionally contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described by +// BucketOptions. +// +// If bucket_options has no type, then there is no histogram associated with +// the Distribution. +type DistributionValue_BucketOptions struct { + // Types that are valid to be assigned to Type: + // *DistributionValue_BucketOptions_Explicit_ + Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } +func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions) ProtoMessage() {} +func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0} +} + +func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) +} +func (m *DistributionValue_BucketOptions) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) +} +func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo + +type isDistributionValue_BucketOptions_Type interface { + isDistributionValue_BucketOptions_Type() +} + +type DistributionValue_BucketOptions_Explicit_ struct { + Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` +} + +func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} + +func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { + if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { + return x.Explicit + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DistributionValue_BucketOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DistributionValue_BucketOptions_OneofMarshaler, _DistributionValue_BucketOptions_OneofUnmarshaler, _DistributionValue_BucketOptions_OneofSizer, []interface{}{ + (*DistributionValue_BucketOptions_Explicit_)(nil), + } +} + +func _DistributionValue_BucketOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DistributionValue_BucketOptions) + // type + switch x := m.Type.(type) { + case *DistributionValue_BucketOptions_Explicit_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Explicit); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DistributionValue_BucketOptions.Type has unexpected type %T", x) + } + return nil +} + +func _DistributionValue_BucketOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DistributionValue_BucketOptions) + switch tag { + case 1: // type.explicit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DistributionValue_BucketOptions_Explicit) + err := b.DecodeMessage(msg) + m.Type = &DistributionValue_BucketOptions_Explicit_{msg} + return true, err + default: + return false, nil + } +} + +func _DistributionValue_BucketOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DistributionValue_BucketOptions) + // type + switch x := m.Type.(type) { + case *DistributionValue_BucketOptions_Explicit_: + s := proto.Size(x.Explicit) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies a set of buckets with arbitrary upper-bounds. +// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket +// index i are: +// +// [0, bucket_bounds[i]) for i == 0 +// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 +// [bucket_bounds[i-1], +infinity) for i == N-1 +type DistributionValue_BucketOptions_Explicit struct { + // The values must be strictly increasing and > 0. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions_Explicit) Reset() { + *m = DistributionValue_BucketOptions_Explicit{} +} +func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} +func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} +} + +func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +type DistributionValue_Bucket struct { + // The number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // If the distribution does not have a histogram, then omit this field. + Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } +func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Bucket) ProtoMessage() {} +func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 1} +} + +func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) +} +func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) +} +func (m *DistributionValue_Bucket) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Bucket.Size(m) +} +func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo + +func (m *DistributionValue_Bucket) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// Exemplars are example points that may be used to annotate aggregated +// Distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket. +type DistributionValue_Exemplar struct { + // Value of the exemplar point. It determines which bucket the exemplar + // belongs to. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. + Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } +func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Exemplar) ProtoMessage() {} +func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 2} +} + +func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) +} +func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) +} +func (m *DistributionValue_Exemplar) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Exemplar.Size(m) +} +func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo + +func (m *DistributionValue_Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { + if m != nil { + return m.Attachments + } + return nil +} + +// The start_timestamp only applies to the count and sum in the SummaryValue. +type SummaryValue struct { + // The total number of recorded values since start_time. Optional since + // some systems don't expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The total sum of recorded values since start_time. Optional since some + // systems don't expose this. If count is zero then this field must be zero. + // This field must be unset if the sum is not available. + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // Values calculated over an arbitrary time window. + Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue) Reset() { *m = SummaryValue{} } +func (m *SummaryValue) String() string { return proto.CompactTextString(m) } +func (*SummaryValue) ProtoMessage() {} +func (*SummaryValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7} +} + +func (m *SummaryValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue.Unmarshal(m, b) +} +func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) +} +func (m *SummaryValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue.Merge(m, src) +} +func (m *SummaryValue) XXX_Size() int { + return xxx_messageInfo_SummaryValue.Size(m) +} +func (m *SummaryValue) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue proto.InternalMessageInfo + +func (m *SummaryValue) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// The values in this message can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type SummaryValue_Snapshot struct { + // The number of values in the snapshot. Optional since some systems don't + // expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of values in the snapshot. Optional since some systems don't + // expose this. If count is zero then this field must be zero or not set + // (if not supported). + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // A list of values at different percentiles of the distribution calculated + // from the current snapshot. The percentiles must be strictly increasing. + PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } +func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot) ProtoMessage() {} +func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0} +} + +func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) +} +func (m *SummaryValue_Snapshot) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot.Size(m) +} +func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { + if m != nil { + return m.PercentileValues + } + return nil +} + +// Represents the value at a given percentile of a distribution. +type SummaryValue_Snapshot_ValueAtPercentile struct { + // The percentile of a distribution. Must be in the interval + // (0.0, 100.0]. + Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` + // The value at the given percentile of a distribution. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { + *m = SummaryValue_Snapshot_ValueAtPercentile{} +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} +func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { + if m != nil { + return m.Percentile + } + return 0 +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) + proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") + proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") + proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") + proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") + proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") + proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") + proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") + proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") + proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") + proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") + proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") + proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") + proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") + proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") +} + +func init() { + proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) +} + +var fileDescriptor_0ee3deb72053811a = []byte{ + // 1114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, + 0x17, 0xf7, 0xda, 0x89, 0xe3, 0x9c, 0x75, 0xd3, 0xf5, 0x28, 0xed, 0xdf, 0x72, 0xfe, 0x0a, 0x61, + 0x11, 0x90, 0x0a, 0x65, 0xad, 0x98, 0xd2, 0x56, 0x15, 0x2a, 0x8a, 0x63, 0x37, 0x36, 0xe4, 0xc3, + 0x1a, 0xdb, 0x95, 0x40, 0x48, 0xd6, 0x7a, 0x3d, 0x49, 0x96, 0x78, 0x3f, 0xba, 0x33, 0x6b, 0xf0, + 0x0b, 0xf0, 0x08, 0x70, 0xcb, 0x2d, 0xe2, 0x39, 0xb8, 0xe2, 0x09, 0x78, 0x0a, 0x5e, 0x01, 0xed, + 0xcc, 0xec, 0x47, 0x62, 0x70, 0x71, 0x91, 0xb8, 0x9b, 0x73, 0xe6, 0xfc, 0x7e, 0x73, 0xbe, 0x77, + 0xe1, 0x91, 0xe7, 0x13, 0xd7, 0x22, 0x2e, 0x0d, 0x69, 0xdd, 0x0f, 0x3c, 0xe6, 0xd5, 0x1d, 0xc2, + 0x02, 0xdb, 0xa2, 0xf5, 0xd9, 0x61, 0x7c, 0x34, 0xf8, 0x05, 0xda, 0x49, 0x4d, 0x85, 0xc6, 0x88, + 0xef, 0x67, 0x87, 0xb5, 0x77, 0xae, 0x3c, 0xef, 0x6a, 0x4a, 0x04, 0xc7, 0x38, 0xbc, 0xac, 0x33, + 0xdb, 0x21, 0x94, 0x99, 0x8e, 0x2f, 0x6c, 0x6b, 0xbb, 0x77, 0x0d, 0xbe, 0x0d, 0x4c, 0xdf, 0x27, + 0x81, 0xe4, 0xaa, 0x7d, 0xb4, 0xe0, 0x48, 0x40, 0xa8, 0x17, 0x06, 0x16, 0x89, 0x3c, 0x89, 0xcf, + 0xc2, 0x58, 0xff, 0x31, 0x0f, 0xc5, 0x33, 0xfe, 0x38, 0xfa, 0x1a, 0x2a, 0xc2, 0x8d, 0xd1, 0x84, + 0x50, 0x2b, 0xb0, 0x7d, 0xe6, 0x05, 0x55, 0x65, 0x4f, 0xd9, 0x57, 0x1b, 0x07, 0xc6, 0x12, 0x8f, + 0x0d, 0x81, 0x6f, 0x25, 0xa0, 0x4e, 0x0e, 0x6b, 0xce, 0x1d, 0x1d, 0xda, 0x86, 0x35, 0xd7, 0x74, + 0x48, 0x35, 0xbf, 0xa7, 0xec, 0x6f, 0x76, 0x72, 0x98, 0x4b, 0xe8, 0x04, 0x80, 0x87, 0x47, 0x02, + 0x9b, 0xd0, 0x6a, 0x61, 0xaf, 0xb0, 0xaf, 0x36, 0x3e, 0x5c, 0xfa, 0xd8, 0xc0, 0x76, 0x48, 0x9f, + 0x9b, 0xe3, 0x0c, 0x14, 0x35, 0xa1, 0x14, 0x47, 0x56, 0x5d, 0xe3, 0x3e, 0x7f, 0xb0, 0x48, 0x93, + 0xc4, 0x3e, 0x3b, 0x34, 0xb0, 0x3c, 0xe3, 0x04, 0xd7, 0x2c, 0x03, 0xa4, 0x91, 0xeb, 0x3f, 0x14, + 0x40, 0xbb, 0x1b, 0x19, 0x42, 0x32, 0x8a, 0x28, 0x2d, 0x9b, 0x32, 0x86, 0x3d, 0x50, 0x63, 0x98, + 0xed, 0xb9, 0x22, 0x40, 0x9c, 0x55, 0x45, 0xa8, 0xd0, 0xb5, 0x59, 0xb5, 0x20, 0x50, 0xd1, 0x19, + 0xbd, 0x84, 0x35, 0x36, 0xf7, 0x85, 0xb3, 0x5b, 0x8d, 0xc6, 0x4a, 0x09, 0x36, 0x06, 0x73, 0x9f, + 0x60, 0x8e, 0x47, 0x2d, 0x80, 0xa9, 0x39, 0x26, 0xd3, 0xd1, 0x0d, 0x99, 0xd3, 0xea, 0x3a, 0xcf, + 0xe0, 0xfb, 0x4b, 0xd9, 0x4e, 0x23, 0xf3, 0x2f, 0xc8, 0x1c, 0x6f, 0x4e, 0xe5, 0x89, 0xea, 0x3f, + 0x2b, 0xb0, 0x16, 0x91, 0xa2, 0xfb, 0xa0, 0x0e, 0xcf, 0xfb, 0xbd, 0xf6, 0x71, 0xf7, 0x65, 0xb7, + 0xdd, 0xd2, 0x72, 0x91, 0xe2, 0xe4, 0x68, 0x78, 0xd2, 0x1e, 0x75, 0xcf, 0x07, 0x4f, 0x1e, 0x6b, + 0x0a, 0xd2, 0xa0, 0x2c, 0x14, 0xad, 0x8b, 0x61, 0xf3, 0xb4, 0xad, 0xe5, 0xd1, 0x43, 0x40, 0x52, + 0xd3, 0xed, 0x0f, 0x70, 0xb7, 0x39, 0x1c, 0x74, 0x2f, 0xce, 0xb5, 0x02, 0xda, 0x06, 0xed, 0x78, + 0x78, 0x36, 0x3c, 0x3d, 0x1a, 0x74, 0x5f, 0xc5, 0xf8, 0x35, 0xf4, 0x00, 0x2a, 0x19, 0xad, 0x24, + 0x59, 0x47, 0x3b, 0xf0, 0xbf, 0xac, 0x3a, 0xcb, 0x54, 0x44, 0x2a, 0x6c, 0xf4, 0x87, 0x67, 0x67, + 0x47, 0xf8, 0x4b, 0x6d, 0x43, 0x7f, 0x01, 0xa5, 0x38, 0x04, 0xa4, 0x41, 0xe1, 0x86, 0xcc, 0x65, + 0x39, 0xa2, 0xe3, 0x9b, 0xab, 0xa1, 0xff, 0xae, 0x00, 0xa4, 0x5d, 0x84, 0x8e, 0xe1, 0x3e, 0x65, + 0x66, 0xc0, 0x46, 0xc9, 0x9c, 0xc9, 0xa6, 0xaf, 0x19, 0x62, 0xd0, 0x8c, 0x78, 0xd0, 0x78, 0xef, + 0x71, 0x0b, 0xbc, 0xc5, 0x21, 0x89, 0x8c, 0x3e, 0x87, 0xb2, 0xa8, 0xc2, 0xcc, 0x9c, 0x86, 0x84, + 0x56, 0xf3, 0xff, 0xa0, 0x93, 0x79, 0x10, 0xaf, 0x22, 0x7b, 0xac, 0x4e, 0x93, 0x33, 0x45, 0xcf, + 0xa1, 0xe8, 0x7b, 0xb6, 0xcb, 0xe2, 0x79, 0xd0, 0x97, 0xb2, 0xf4, 0x22, 0x53, 0x2c, 0x11, 0xfa, + 0x67, 0x00, 0x29, 0x2d, 0xda, 0x86, 0x75, 0xee, 0x8f, 0xcc, 0x8f, 0x10, 0xd0, 0x0e, 0x6c, 0x5e, + 0x9b, 0x54, 0x78, 0xca, 0xf3, 0x53, 0xc2, 0xa5, 0x6b, 0x93, 0x72, 0x88, 0xfe, 0x6b, 0x1e, 0xd6, + 0x39, 0x25, 0x7a, 0x06, 0x9b, 0xab, 0x64, 0x24, 0x35, 0x46, 0xef, 0x82, 0x6a, 0xbb, 0xec, 0xc9, + 0xe3, 0xcc, 0x13, 0x85, 0x4e, 0x0e, 0x03, 0x57, 0x0a, 0xcf, 0xde, 0x83, 0xf2, 0xc4, 0x0b, 0xc7, + 0x53, 0x22, 0x6d, 0xa2, 0xc9, 0x50, 0x3a, 0x39, 0xac, 0x0a, 0xad, 0x30, 0x1a, 0x01, 0x9a, 0xd8, + 0x94, 0x05, 0xf6, 0x38, 0x8c, 0x0a, 0x27, 0x4d, 0xc5, 0x74, 0x1b, 0x4b, 0x93, 0xd2, 0xca, 0xc0, + 0x38, 0x57, 0x27, 0x87, 0x2b, 0x93, 0xbb, 0x4a, 0xd4, 0x83, 0x7b, 0x34, 0x74, 0x1c, 0x33, 0x98, + 0x4b, 0xee, 0x75, 0xce, 0xfd, 0x68, 0x29, 0x77, 0x5f, 0x20, 0x62, 0xda, 0x32, 0xcd, 0xc8, 0xcd, + 0x0d, 0x99, 0x71, 0xfd, 0xb7, 0x22, 0x54, 0x16, 0xbc, 0x88, 0x0a, 0x62, 0x79, 0xa1, 0xcb, 0x78, + 0x3e, 0x0b, 0x58, 0x08, 0x51, 0x13, 0xd3, 0xd0, 0xe1, 0x79, 0x52, 0x70, 0x74, 0x44, 0x4f, 0xa1, + 0x4a, 0x43, 0x67, 0xe4, 0x5d, 0x8e, 0xe8, 0xeb, 0xd0, 0x0c, 0xc8, 0x64, 0x34, 0x21, 0x33, 0xdb, + 0xe4, 0x1d, 0xcd, 0x53, 0x85, 0x1f, 0xd0, 0xd0, 0xb9, 0xb8, 0xec, 0x8b, 0xdb, 0x56, 0x7c, 0x89, + 0x2c, 0xd8, 0x1a, 0x87, 0xd6, 0x0d, 0x61, 0x23, 0x8f, 0x37, 0x3b, 0x95, 0xe9, 0xfa, 0x74, 0xb5, + 0x74, 0x19, 0x4d, 0x4e, 0x72, 0x21, 0x38, 0xf0, 0xbd, 0x71, 0x56, 0x44, 0x17, 0xb0, 0x21, 0x14, + 0xf1, 0xbe, 0xf9, 0xe4, 0xad, 0xd8, 0x71, 0xcc, 0x52, 0xfb, 0x49, 0x81, 0x7b, 0xb7, 0x5e, 0x44, + 0x16, 0x94, 0xc8, 0x77, 0xfe, 0xd4, 0xb6, 0x6c, 0x26, 0x7b, 0xaf, 0xfd, 0x6f, 0x22, 0x30, 0xda, + 0x92, 0xac, 0x93, 0xc3, 0x09, 0x71, 0x4d, 0x87, 0x52, 0xac, 0x47, 0x0f, 0xa1, 0x38, 0xf6, 0x42, + 0x77, 0x42, 0xab, 0xca, 0x5e, 0x61, 0x5f, 0xc1, 0x52, 0x6a, 0x16, 0xc5, 0x9a, 0xae, 0x51, 0x28, + 0x0a, 0xc6, 0xbf, 0xa9, 0x61, 0x3f, 0x72, 0x98, 0x38, 0xfe, 0xd4, 0x0c, 0x78, 0x21, 0xd5, 0xc6, + 0xd3, 0x15, 0x1d, 0x6e, 0x4b, 0x38, 0x4e, 0x88, 0x6a, 0xdf, 0xe7, 0x23, 0x0f, 0x85, 0x70, 0x7b, + 0x98, 0x95, 0x78, 0x98, 0x6f, 0x4d, 0x69, 0x7e, 0x95, 0x29, 0xfd, 0x06, 0x54, 0x93, 0x31, 0xd3, + 0xba, 0x76, 0x48, 0xba, 0x6b, 0x3a, 0x6f, 0xe9, 0xb4, 0x71, 0x94, 0x52, 0xb5, 0x5d, 0x16, 0xcc, + 0x71, 0x96, 0xbc, 0xf6, 0x02, 0xb4, 0xbb, 0x06, 0x7f, 0xb1, 0xba, 0x93, 0x08, 0xf3, 0x99, 0x75, + 0xf5, 0x3c, 0xff, 0x4c, 0xd1, 0xff, 0x28, 0x40, 0x39, 0x3b, 0x77, 0xe8, 0x30, 0x5b, 0x04, 0xb5, + 0xb1, 0xb3, 0x10, 0x72, 0x37, 0xd9, 0x35, 0x71, 0x85, 0x8c, 0x74, 0xca, 0xd4, 0xc6, 0xff, 0x17, + 0x00, 0xad, 0x74, 0xf1, 0x88, 0x19, 0x3c, 0x87, 0x12, 0x75, 0x4d, 0x9f, 0x5e, 0x7b, 0xe2, 0xc3, + 0xad, 0xbe, 0xe1, 0x23, 0x9d, 0xf5, 0xcf, 0xe8, 0x4b, 0x24, 0x4e, 0x38, 0x6a, 0xbf, 0xe4, 0xa1, + 0x14, 0xab, 0xff, 0x0b, 0xff, 0x5f, 0x43, 0xc5, 0x27, 0x81, 0x45, 0x5c, 0x66, 0xc7, 0x6b, 0x36, + 0xae, 0x72, 0x6b, 0xf5, 0x40, 0x0c, 0x2e, 0x1e, 0xb1, 0x5e, 0x42, 0x89, 0xb5, 0x94, 0x5e, 0x7c, + 0xb9, 0x6a, 0x5d, 0xa8, 0x2c, 0x98, 0xa1, 0x5d, 0x80, 0xd4, 0x50, 0x36, 0x6f, 0x46, 0x73, 0xbb, + 0xea, 0x71, 0x5f, 0x37, 0x67, 0xb0, 0x6b, 0x7b, 0xcb, 0xdc, 0x6c, 0x96, 0xc5, 0x5f, 0x11, 0xed, + 0x45, 0x17, 0x3d, 0xe5, 0xab, 0xd6, 0x95, 0xcd, 0xae, 0xc3, 0xb1, 0x61, 0x79, 0x4e, 0x5d, 0x60, + 0x0e, 0x6c, 0x97, 0xb2, 0x20, 0x8c, 0x7a, 0x8e, 0x6f, 0xc7, 0x7a, 0x4a, 0x77, 0x20, 0x7e, 0x8c, + 0xaf, 0x88, 0x7b, 0x70, 0x95, 0xfd, 0x51, 0x1f, 0x17, 0xf9, 0xc5, 0xc7, 0x7f, 0x06, 0x00, 0x00, + 0xff, 0xff, 0x24, 0xa6, 0x3d, 0x2b, 0xce, 0x0b, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go new file mode 100644 index 000000000..560dbd94a --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Type identifier for the resource. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Set of labels that describe the resource. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_584700775a2fc762, []int{0} +} + +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Resource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") +} + +func init() { + proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) +} + +var fileDescriptor_584700775a2fc762 = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, + 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, + 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, + 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, + 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, + 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, + 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, + 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, + 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, + 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf, + 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19, + 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5, + 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99, + 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00, +} diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE similarity index 100% rename from vendor/sigs.k8s.io/yaml/LICENSE rename to vendor/github.com/ghodss/yaml/LICENSE diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go similarity index 99% rename from vendor/sigs.k8s.io/yaml/fields.go rename to vendor/github.com/ghodss/yaml/fields.go index 235b7f2cf..586007402 100644 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -1,7 +1,6 @@ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. - package yaml import ( diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go similarity index 77% rename from vendor/sigs.k8s.io/yaml/yaml.go rename to vendor/github.com/ghodss/yaml/yaml.go index 024596112..4fb4054a8 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -4,14 +4,13 @@ import ( "bytes" "encoding/json" "fmt" - "io" "reflect" "strconv" "gopkg.in/yaml.v2" ) -// Marshal marshals the object into JSON then converts JSON to YAML and returns the +// Marshals the object into JSON then converts JSON to YAML and returns the // YAML. func Marshal(o interface{}) ([]byte, error) { j, err := json.Marshal(o) @@ -27,35 +26,15 @@ func Marshal(o interface{}) ([]byte, error) { return y, nil } -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, false, opts...) -} - -// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal -// into an object, optionally configuring the behavior of the JSON unmarshal. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) -} - -// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, -// optionally performing the unmarshalling strictly -func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { vo := reflect.ValueOf(o) - unmarshalFn := yaml.Unmarshal - if strict { - unmarshalFn = yaml.UnmarshalStrict - } - j, err := yamlToJSON(y, &vo, unmarshalFn) + j, err := yamlToJSON(y, &vo) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = json.Unmarshal(j, o) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -63,22 +42,7 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error return nil } -// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the -// object, optionally applying decoder options prior to decoding. We are not -// using json.Unmarshal directly as we want the chance to pass in non-default -// options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) - for _, opt := range opts { - d = opt(d) - } - if err := d.Decode(&o); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) - } - return nil -} - -// JSONToYAML Converts JSON to YAML. +// Convert JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} @@ -96,8 +60,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, -// passing JSON through this method should be a no-op. +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -106,22 +70,14 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. -// -// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) + return yamlToJSON(y, nil) } -// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, -// returning an error on any duplicate field names. -func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := yaml.Unmarshal(y, &yamlObj) if err != nil { return nil, err } @@ -316,4 +272,6 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in } return yamlObj, nil } + + return nil, nil } diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go new file mode 100644 index 000000000..54bd7afdc --- /dev/null +++ b/vendor/github.com/golang/glog/glog.go @@ -0,0 +1,1180 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go new file mode 100644 index 000000000..65075d281 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 000000000..eac1c7664 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index 63b0f08be..d9aa3c42d 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,6 +186,7 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } + // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index f9b6e41b3..d4db5a1c1 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,8 +246,7 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) + m1, m2 := e1.value, e2.value if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add30..816a3b9d6 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,25 +185,9 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. + desc *ExtensionDesc value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + enc []byte } // SetRawExtension is for testing only. @@ -350,7 +334,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return extensionAsLegacyType(e.value), nil + return e.value, nil } if extension.ExtensionType == nil { @@ -365,11 +349,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) + e.value = v e.desc = extension e.enc = nil emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil + return e.value, nil } // defaultExtensionValue returns the default value for extension. @@ -504,7 +488,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + return errors.New("proto: bad extension value type") } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -516,7 +500,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + extmap[extension.Field] = Extension{desc: extension, value: value} return nil } @@ -557,51 +541,3 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index fdd328bb7..75565cc6d 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -341,6 +341,26 @@ type Message interface { ProtoMessage() } +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -940,19 +960,13 @@ func isProto3Zero(v reflect.Value) bool { return false } -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index f48a75676..3b6ca41d5 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,7 +36,13 @@ package proto */ import ( + "bytes" + "encoding/json" "errors" + "fmt" + "reflect" + "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -139,9 +145,46 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { +func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -179,3 +222,93 @@ func unmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index 94fa9194a..b6cad9083 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,13 +79,10 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { +func toAddrPointer(i *interface{}, isptr bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) - if deref { - u = u.Elem() - } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index dbfffe071..d55a335d9 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,21 +85,16 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { +func toAddrPointer(i *interface{}, isptr bool) pointer { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 79668ff5c..50b99b83a 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -334,6 +334,9 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { + if collectStats { + stats.Chit++ + } return sprop } @@ -343,20 +346,17 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } return prop } + if collectStats { + stats.Cmiss++ + } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,14 +391,13 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) } - if len(oots) > 0 { + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index 5cb11fa95..b16794496 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,7 +87,6 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -321,11 +320,8 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -411,22 +407,13 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, - deref: deref, } // update cache @@ -461,7 +448,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -489,6 +476,10 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2319,8 +2310,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2338,8 +2329,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2408,7 +2399,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2443,7 +2434,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2474,7 +2465,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2519,7 +2510,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2562,7 +2553,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2600,7 +2591,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2630,7 +2621,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } return n @@ -2665,7 +2656,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index acee2fc52..ebf1caa56 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -362,48 +362,46 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } } - } - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } } } - } // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -1950,7 +1948,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) == 0 { + if len(b) <= 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 000000000..e855b1f5c --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2812 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { + proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) +} + +var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, + 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, + 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, + 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, + 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, + 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, + 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, + 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, + 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, + 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, + 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, + 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, + 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, + 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, + 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, + 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, + 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, + 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, + 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, + 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, + 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, + 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, + 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, + 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, + 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, + 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, + 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, + 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, + 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, + 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, + 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, + 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, + 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, + 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, + 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, + 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, + 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, + 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, + 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, + 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, + 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, + 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, + 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, + 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, + 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, + 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, + 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, + 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, + 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, + 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, + 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, + 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, + 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, + 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, + 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, + 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, + 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, + 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, + 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, + 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, + 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, + 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, + 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, + 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, + 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, + 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, + 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, + 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, + 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, + 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, + 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, + 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, + 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, + 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, + 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, + 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, + 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, + 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, + 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, + 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, + 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, + 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, + 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, + 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, + 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, + 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, + 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, + 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, + 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, + 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, + 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, + 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, + 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, + 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, + 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, + 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, + 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, + 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, + 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, + 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, + 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, + 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, + 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, + 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, + 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, + 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, + 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, + 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, + 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, + 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, + 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, + 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, + 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, + 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, + 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, + 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, + 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, + 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, + 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, + 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, + 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, + 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, + 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, + 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, + 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, + 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, + 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, + 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, + 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, + 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, + 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, + 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, + 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, + 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, + 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, + 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, + 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, + 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, + 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, + 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, + 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, + 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, + 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, + 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, + 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, + 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, + 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, + 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, + 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, + 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, + 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, + 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, + 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, + 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, + 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, + 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, + 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index 78ee52334..e3c56d3ff 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -package any +package any // import "github.com/golang/protobuf/ptypes/any" -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. @@ -101,18 +99,17 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // } // type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: // // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -121,10 +118,6 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // @@ -140,19 +133,17 @@ func (m *Any) Reset() { *m = Any{} } func (m *Any) String() string { return proto.CompactTextString(m) } func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} + return fileDescriptor_any_744b9ca530f228db, []int{0} } - func (*Any) XXX_WellKnownType() string { return "Any" } - func (m *Any) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Any.Unmarshal(m, b) } func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Any.Marshal(b, m, deterministic) } -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) } func (m *Any) XXX_Size() int { return xxx_messageInfo_Any.Size(m) @@ -181,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor_b53526c13ae22eb4 = []byte{ +var fileDescriptor_any_744b9ca530f228db = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 26d1ca2fb..65cb0f8eb 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond + d += time.Duration(p.Nanos) if (d < 0) != (p.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 0d681ee21..a7beb2c41 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -package duration +package duration // import "github.com/golang/protobuf/ptypes/duration" -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond @@ -101,19 +99,17 @@ func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) String() string { return proto.CompactTextString(m) } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} + return fileDescriptor_duration_e7d612259e3f0613, []int{0} } - func (*Duration) XXX_WellKnownType() string { return "Duration" } - func (m *Duration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Duration.Unmarshal(m, b) } func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Duration.Marshal(b, m, deterministic) } -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) } func (m *Duration) XXX_Size() int { return xxx_messageInfo_Duration.Size(m) @@ -142,9 +138,11 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..a69b403ce --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,79 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package empty // import "github.com/golang/protobuf/ptypes/empty" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_empty_39e6d6db0632e5b2, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_39e6d6db0632e5b2) } + +var fileDescriptor_empty_39e6d6db0632e5b2 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 000000000..ee6382e14 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,450 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb // import "github.com/golang/protobuf/ptypes/struct" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (dst *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(dst, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += 1 // tag and wire + n += 8 + case *Value_StringValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += 1 // tag and wire + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (dst *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(dst, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { + proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) +} + +var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 8da0df01a..47f10dbc2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -111,9 +111,11 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) ts := &tspb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), + Seconds: seconds, + Nanos: nanos, } if err := validateTimestamp(ts); err != nil { return nil, err diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 31cd846de..8e76ae976 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,13 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -package timestamp +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -18,7 +16,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at @@ -83,9 +81,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). +// is required, though only UTC (as indicated by "Z") is presently supported. // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -96,8 +92,8 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { @@ -119,19 +115,17 @@ func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} } - func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - func (m *Timestamp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Timestamp.Unmarshal(m, b) } func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) } -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) } func (m *Timestamp) XXX_Size() int { return xxx_messageInfo_Timestamp.Size(m) @@ -160,9 +154,11 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} -var fileDescriptor_292007bbfe81227e = []byte{ +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 000000000..0f0fa837f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) +} + +var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 000000000..6ff062f9b --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,890 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go new file mode 100644 index 000000000..cb95b7fa1 --- /dev/null +++ b/vendor/github.com/google/btree/btree_mem.go @@ -0,0 +1,76 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +// This binary compares memory usage between btree and gollrb. +package main + +import ( + "flag" + "fmt" + "math/rand" + "runtime" + "time" + + "github.com/google/btree" + "github.com/petar/GoLLRB/llrb" +) + +var ( + size = flag.Int("size", 1000000, "size of the tree to build") + degree = flag.Int("degree", 8, "degree of btree") + gollrb = flag.Bool("llrb", false, "use llrb instead of btree") +) + +func main() { + flag.Parse() + vals := rand.Perm(*size) + var t, v interface{} + v = vals + var stats runtime.MemStats + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- BEFORE ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + start := time.Now() + if *gollrb { + tr := llrb.New() + for _, v := range vals { + tr.ReplaceOrInsert(llrb.Int(v)) + } + t = tr // keep it around + } else { + tr := btree.New(*degree) + for _, v := range vals { + tr.ReplaceOrInsert(btree.Int(v)) + } + t = tr // keep it around + } + fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) + fmt.Println("-------- AFTER ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- AFTER GC ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + if t == v { + fmt.Println("to make sure vals and tree aren't GC'd") + } +} diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b17461631 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..7f9e0c6c0 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..f326b54db --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..524404cc5 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..199a1ac65 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..84af91c9f --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 000000000..6d16b6578 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 000000000..b1d53dd19 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,161 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 000000000..3fd1b0b84 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,39 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +// Version specifies the gax-go version being used. +const Version = "2.0.4" diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/googleapis/gax-go/v2/header.go similarity index 60% rename from vendor/github.com/golang/protobuf/proto/deprecated.go rename to vendor/github.com/googleapis/gax-go/v2/header.go index 35b882c09..139371a0b 100644 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -1,7 +1,5 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf +// Copyright 2018, Google Inc. +// All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,35 +27,27 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } +package gax -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } +import "bytes" -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] } - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 000000000..fe31dd004 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,99 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strings" + "time" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 000000000..81316beb0 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 000000000..42e3129d8 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 000000000..b41a63d1f --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,551 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + if req.Method == http.MethodGet { + return req.URL.String() + } else { + return req.Method + " " + req.URL.String() + } +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Te": {}, + "Trailers": {}, + "Transfer-Encoding": {}, + "Upgrade": {}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/k8s.io/utils/third_party/forked/golang/LICENSE b/vendor/github.com/imdario/mergo/LICENSE similarity index 96% rename from vendor/k8s.io/utils/third_party/forked/golang/LICENSE rename to vendor/github.com/imdario/mergo/LICENSE index 744875676..686680298 100644 --- a/vendor/k8s.io/utils/third_party/forked/golang/LICENSE +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -1,3 +1,4 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 000000000..6e9aa7baf --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 000000000..3f5afa83a --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,175 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 000000000..f8de6c543 --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,255 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasExportedField(dst.Field(i)) + } else { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers + overwriteWithEmptyValue bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + overwriteWithEmptySrc := config.overwriteWithEmptyValue + config.overwriteWithEmptyValue = false + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasExportedField(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) + } + } + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { + continue + } + + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } + if src.Kind() != reflect.Interface { + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + default: + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 000000000..a82fea2fd --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,97 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 000000000..2f1ad0082 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,4 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string + author: root diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..8e26ffeec --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..1240a1755 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/knative/caching/.gitattributes b/vendor/github.com/knative/caching/.gitattributes new file mode 100644 index 000000000..00900bb83 --- /dev/null +++ b/vendor/github.com/knative/caching/.gitattributes @@ -0,0 +1,6 @@ +# This file is documented at https://git-scm.com/docs/gitattributes. +# Linguist-specific attributes are documented at +# https://github.com/github/linguist. + +**/zz_generated.*.go linguist-generated=true +/pkg/client/** linguist-generated=true diff --git a/vendor/github.com/knative/caching/.github/issue-template.md b/vendor/github.com/knative/caching/.github/issue-template.md new file mode 100644 index 000000000..7be34fe6f --- /dev/null +++ b/vendor/github.com/knative/caching/.github/issue-template.md @@ -0,0 +1,34 @@ + + +## Expected Behavior + + +## Actual Behavior + + +## Steps to Reproduce the Problem + + 1. + 2. + 3. + +## Additional Info diff --git a/vendor/github.com/knative/caching/.github/pull-request-template.md b/vendor/github.com/knative/caching/.github/pull-request-template.md new file mode 100644 index 000000000..033ec443c --- /dev/null +++ b/vendor/github.com/knative/caching/.github/pull-request-template.md @@ -0,0 +1,6 @@ + diff --git a/vendor/github.com/knative/caching/.gitignore b/vendor/github.com/knative/caching/.gitignore new file mode 100644 index 000000000..9f11b755a --- /dev/null +++ b/vendor/github.com/knative/caching/.gitignore @@ -0,0 +1 @@ +.idea/ diff --git a/vendor/github.com/knative/caching/CONTRIBUTING.md b/vendor/github.com/knative/caching/CONTRIBUTING.md new file mode 100644 index 000000000..a6138bbc1 --- /dev/null +++ b/vendor/github.com/knative/caching/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contribution guidelines + +So you want to hack on Knative `caching`? Yay! Please refer to Knative's overall +[contribution guidelines](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md) +to find out how you can help. diff --git a/vendor/github.com/knative/caching/DEVELOPMENT.md b/vendor/github.com/knative/caching/DEVELOPMENT.md new file mode 100644 index 000000000..57ef2b25f --- /dev/null +++ b/vendor/github.com/knative/caching/DEVELOPMENT.md @@ -0,0 +1,68 @@ +# Development + +This doc explains how to setup a development environment so you can get started +[contributing](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md) +to Knative `caching`. Also take a look at: + +* [The pull request workflow](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md#pull-requests) + +## Getting started + +1. Create [a GitHub account](https://github.com/join) +1. Setup [GitHub access via + SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) +1. Install [requirements](#requirements) +1. Set up your [shell environment](#environment-setup) +1. [Create and checkout a repo fork](#checkout-your-fork) + +Before submitting a PR, see also [CONTRIBUTING.md](./CONTRIBUTING.md). + +### Requirements + +You must install these tools: + +1. [`go`](https://golang.org/doc/install): The language Knative `caching` is built in +1. [`git`](https://help.github.com/articles/set-up-git/): For source control +1. [`dep`](https://github.com/golang/dep): For managing external dependencies. + +### Environment setup + +To get started you'll need to set these environment variables (we recommend +adding them to your `.bashrc`): + +1. `GOPATH`: If you don't have one, simply pick a directory and add +`export GOPATH=...` +1. `$GOPATH/bin` on `PATH`: This is so that tooling installed via `go get` will +work properly. + +`.bashrc` example: + +```shell +export GOPATH="$HOME/go" +export PATH="${PATH}:${GOPATH}/bin" +``` + +### Checkout your fork + +The Go tools require that you clone the repository to the `src/github.com/knative/caching` directory +in your [`GOPATH`](https://github.com/golang/go/wiki/SettingGOPATH). + +To check out this repository: + +1. Create your own [fork of this + repo](https://help.github.com/articles/fork-a-repo/) +1. Clone it to your machine: + + ```shell + mkdir -p ${GOPATH}/src/github.com/knative + cd ${GOPATH}/src/github.com/knative + git clone git@github.com:${YOUR_GITHUB_USERNAME}/caching.git + cd caching + git remote add upstream git@github.com:knative/caching.git + git remote set-url --push upstream no_push + ``` + +_Adding the `upstream` remote sets you up nicely for regularly [syncing your +fork](https://help.github.com/articles/syncing-a-fork/)._ + +Once you reach this point you are ready to do a full build and deploy as described below. diff --git a/vendor/github.com/knative/caching/Gopkg.lock b/vendor/github.com/knative/caching/Gopkg.lock new file mode 100644 index 000000000..cfe7125c3 --- /dev/null +++ b/vendor/github.com/knative/caching/Gopkg.lock @@ -0,0 +1,538 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "NUT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + digest = "1:abea725bcf0210887f5da19d804fffa1dd45a42a56bdf5f02322345e3fee4f0d" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "NUT" + revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "NUT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "NUT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "NUT" + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + +[[projects]] + digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/internal/diff", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "NUT" + revision = "3af367b6b30c263d47e8895973edcca9a49cf029" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "NUT" + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + branch = "master" + digest = "1:b6b3bd1c08338cb397623d1b9dedde711eccc2d3408fe9017a495d815065d869" + name = "github.com/google/licenseclassifier" + packages = [ + ".", + "internal/sets", + "stringclassifier", + "stringclassifier/internal/pq", + "stringclassifier/searchset", + "stringclassifier/searchset/tokenizer", + ] + pruneopts = "NUT" + revision = "c2a262e3078ad90718f59866f1ec18601b2fee1b" + +[[projects]] + digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "NUT" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "NUT" + revision = "9cad4c3443a7200dd6400aef47183728de563a38" + +[[projects]] + digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "NUT" + revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" + version = "v0.5.0" + +[[projects]] + digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "NUT" + revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" + +[[projects]] + digest = "1:fa6e19b10f3088d6f290e32ba2f9735d2810dd8e6544028d4d0c842c162b83ef" + name = "github.com/knative/pkg" + packages = [ + "apis", + "kmeta", + ] + pruneopts = "NUT" + revision = "60fdcbcabd2faeb34328d8b2725dc76c59189453" + +[[projects]] + branch = "master" + digest = "1:da39b58557275d30a9340c2e1e13e16691461f9859d3230f59cceed411c04b49" + name = "github.com/knative/test-infra" + packages = [ + "scripts", + "tools/dep-collector", + ] + pruneopts = "UT" + revision = "89e4aae358be056ee70b595c20106a4a5c70fdc1" + +[[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "NUT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "NUT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + branch = "master" + digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "NUT" + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "NUT" + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + +[[projects]] + digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + pruneopts = "NUT" + revision = "1744e2970ca51c86172c8190fadad617561ed6e7" + version = "v1.0.0" + +[[projects]] + digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NUT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NUT" + revision = "182538f80094b6a8efaade63a8fd8e0d9d5843dd" + +[[projects]] + branch = "master" + digest = "1:1400b8e87c2c9bd486ea1a13155f59f8f02d385761206df05c0b7db007a53b2c" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + ] + pruneopts = "NUT" + revision = "8a410e7b638dca158bf9e766925842f6651ff828" + +[[projects]] + branch = "master" + digest = "1:13d575ad6576f56693a410e77830da614005f87955e7cd2ac6afbf9b54afd651" + name = "golang.org/x/oauth2" + packages = [ + ".", + "internal", + ] + pruneopts = "NUT" + revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3" + +[[projects]] + branch = "master" + digest = "1:8270f14d85e8d36e852e018872ac923f7a2067648bfe8428d01be288818aa337" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "NUT" + revision = "fa5fdf94c78965f1aa8423f0cc50b8b8d728b05a" + +[[projects]] + digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "NUT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "NUT" + revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" + +[[projects]] + branch = "master" + digest = "1:a05bd2d296bc727082abcb63ff52615b4dcc6219d8b61e99fd83d605dc779a18" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports", + "internal/fastwalk", + ] + pruneopts = "NUT" + revision = "7ca132754999accbaa5c1735eda29e7ce0f3bf03" + +[[projects]] + digest = "1:34c10243da5972105edd1b4b883e2bd918fbb3f73fbe14d6af6929e547173494" + name = "google.golang.org/appengine" + packages = [ + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "NUT" + revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" + version = "v1.4.0" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "NUT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NUT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + digest = "1:4485f6050feae6844efd79bce3f5b35e5ed4a21dd79ef6a2dbbee263531cea09" + name = "k8s.io/api" + packages = [ + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "NUT" + revision = "145d52631d00cbfe68490d19ae4f0f501fd31a95" + version = "kubernetes-1.12.6" + +[[projects]] + digest = "1:b172899fc4b03d2bda3c223706822b3cf801b2a63ce4740dda0fe8206db97017" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/equality", + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/sets/types", + "pkg/util/strategicpatch", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "NUT" + revision = "01f179d85dbce0f2e0e4351a92394b38694b7cae" + version = "kubernetes-1.12.6" + +[[projects]] + digest = "1:2d8d57c8bd9075e441307d242f4a1a56510c19cbce082b434150ad9b94409c25" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/fake", + "kubernetes/scheme", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "rest", + "rest/watch", + "testing", + "tools/cache", + "tools/clientcmd/api", + "tools/metrics", + "tools/pager", + "transport", + "util/buffer", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/integer", + "util/retry", + ] + pruneopts = "NUT" + revision = "78295b709ec6fa5be12e35892477a326dea2b5d3" + version = "kubernetes-1.12.6" + +[[projects]] + digest = "1:26b81b5e76e3f84ea5140da4f74649576e470f79091d2ef8e0d1b5000bc636ca" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "pkg/util", + ] + pruneopts = "T" + revision = "b1289fc74931d4b6b04bd1a259acfc88a2cb0a66" + version = "kubernetes-1.12.6" + +[[projects]] + branch = "master" + digest = "1:5249c83f0fb9e277b2d28c19eca814feac7ef05dc762e4deaf0a2e4b1a7c5df3" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "NUT" + revision = "4242d8e6c5dba56827bb7bcf14ad11cda38f3991" + +[[projects]] + branch = "master" + digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54" + name = "k8s.io/kube-openapi" + packages = ["pkg/util/proto"] + pruneopts = "NUT" + revision = "e3762e86a74c878ffed47484592986685639c2cd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/google/go-cmp/cmp", + "github.com/knative/pkg/apis", + "github.com/knative/pkg/kmeta", + "github.com/knative/test-infra/scripts", + "github.com/knative/test-infra/tools/dep-collector", + "k8s.io/api/core/v1", + "k8s.io/apimachinery/pkg/api/equality", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets/types", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/rest", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/knative/caching/Gopkg.toml b/vendor/github.com/knative/caching/Gopkg.toml new file mode 100644 index 000000000..4378ba90b --- /dev/null +++ b/vendor/github.com/knative/caching/Gopkg.toml @@ -0,0 +1,48 @@ +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. + +required = [ + "k8s.io/apimachinery/pkg/util/sets/types", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/informer-gen", + "github.com/knative/test-infra/scripts", + "github.com/knative/test-infra/tools/dep-collector", +] + +[[override]] + name = "k8s.io/api" + version = "kubernetes-1.12.6" + +[[override]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.12.6" + +[[override]] + name = "k8s.io/code-generator" + version = "kubernetes-1.12.6" + +[[override]] + name = "k8s.io/client-go" + version = "kubernetes-1.12.6" + +[[override]] + name = "github.com/knative/pkg" + # HEAD as of 2019-03-21 + revision = "60fdcbcabd2faeb34328d8b2725dc76c59189453" + +[prune] + go-tests = true + unused-packages = true + non-go = true + +[[prune.project]] + name = "k8s.io/code-generator" + unused-packages = false + non-go = false + +[[prune.project]] + name = "github.com/knative/test-infra" + non-go = false diff --git a/vendor/github.com/knative/caching/LICENSE b/vendor/github.com/knative/caching/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/knative/caching/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/knative/caching/OWNERS b/vendor/github.com/knative/caching/OWNERS new file mode 100644 index 000000000..e5d053381 --- /dev/null +++ b/vendor/github.com/knative/caching/OWNERS @@ -0,0 +1,6 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- evankanderson +- mattmoor +- vaikas-google diff --git a/vendor/github.com/knative/caching/README.md b/vendor/github.com/knative/caching/README.md new file mode 100644 index 000000000..39439aa22 --- /dev/null +++ b/vendor/github.com/knative/caching/README.md @@ -0,0 +1,14 @@ +# Knative Caching Resources + +[![GoDoc](https://godoc.org/github.com/knative/caching?status.svg)](https://godoc.org/github.com/knative/caching) +[![Go Report Card](https://goreportcard.com/badge/knative/caching)](https://goreportcard.com/report/knative/caching) + +Knative `caching` defines resources that can be used to express a desire to +cache things. These are **just** API definitions for caching custom resources +plugins, it does not include an implementation of this API. + +To learn more about Knative, please visit our +[Knative docs](https://github.com/knative/docs) repository. + +If you are interested in contributing, see [CONTRIBUTING.md](./CONTRIBUTING.md) +and [DEVELOPMENT.md](./DEVELOPMENT.md). diff --git a/vendor/github.com/knative/caching/code-of-conduct.md b/vendor/github.com/knative/caching/code-of-conduct.md new file mode 100644 index 000000000..d94fbdf18 --- /dev/null +++ b/vendor/github.com/knative/caching/code-of-conduct.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by contacting the project team at +knative-code-of-conduct@googlegroups.com. All complaints will be reviewed +and investigated and will result in a response that is deemed +necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of +an incident. Further details of specific enforcement policies may be +posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/knative/caching/config/image.yaml b/vendor/github.com/knative/caching/config/image.yaml new file mode 100644 index 000000000..50d3e9b67 --- /dev/null +++ b/vendor/github.com/knative/caching/config/image.yaml @@ -0,0 +1,34 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: images.caching.internal.knative.dev +spec: + group: caching.internal.knative.dev + version: v1alpha1 + names: + kind: Image + plural: images + singular: image + categories: + - all + - knative-internal + - caching + shortNames: + - img + scope: Namespaced + subresources: + status: {} diff --git a/vendor/github.com/knative/caching/hack/OWNERS b/vendor/github.com/knative/caching/hack/OWNERS new file mode 100644 index 000000000..d6d72cdeb --- /dev/null +++ b/vendor/github.com/knative/caching/hack/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- adrcunha diff --git a/vendor/github.com/knative/caching/hack/boilerplate/add-boilerplate.sh b/vendor/github.com/knative/caching/hack/boilerplate/add-boilerplate.sh new file mode 100755 index 000000000..f642f2f73 --- /dev/null +++ b/vendor/github.com/knative/caching/hack/boilerplate/add-boilerplate.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +USAGE=$(cat <.txt to all . files missing it in a directory. + +Usage: (from repository root) + ./hack/boilerplate/add-boilerplate.sh + +Example: (from repository root) + ./hack/boilerplate/add-boilerplate.sh go cmd +EOF +) + +set -e + +if [[ -z $1 || -z $2 ]]; then + echo "${USAGE}" + exit 1 +fi + +grep -r -L -P "Copyright \d+ The Knative Authors" $2 \ + | grep -P "\.$1\$" \ + | xargs -I {} sh -c \ + "cat hack/boilerplate/boilerplate.$1.txt {} > /tmp/boilerplate && mv /tmp/boilerplate {}" diff --git a/cmd/triggers/main.go b/vendor/github.com/knative/caching/hack/boilerplate/boilerplate.go.txt similarity index 82% rename from cmd/triggers/main.go rename to vendor/github.com/knative/caching/hack/boilerplate/boilerplate.go.txt index c0d174a92..02c504e93 100644 --- a/cmd/triggers/main.go +++ b/vendor/github.com/knative/caching/hack/boilerplate/boilerplate.go.txt @@ -1,19 +1,15 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2018 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main - -import "fmt" - -func main() { - fmt.Println("Placeholder binary") -} diff --git a/vendor/github.com/knative/caching/hack/boilerplate/boilerplate.sh.txt b/vendor/github.com/knative/caching/hack/boilerplate/boilerplate.sh.txt new file mode 100644 index 000000000..e05a3636b --- /dev/null +++ b/vendor/github.com/knative/caching/hack/boilerplate/boilerplate.sh.txt @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/vendor/github.com/knative/caching/hack/update-codegen.sh b/vendor/github.com/knative/caching/hack/update-codegen.sh new file mode 100755 index 000000000..165b2b800 --- /dev/null +++ b/vendor/github.com/knative/caching/hack/update-codegen.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +REPO_ROOT=$(dirname ${BASH_SOURCE})/.. +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} + +# generate the code with: +# --output-base because this script should also be able to run inside the vendor dir of +# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir +# instead of the $GOPATH directly. For normal projects this can be dropped. +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/knative/caching/pkg/client github.com/knative/caching/pkg/apis \ + "caching:v1alpha1" \ + --go-header-file ${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt + +# Make sure our dependencies are up-to-date +${REPO_ROOT}/hack/update-deps.sh diff --git a/vendor/github.com/knative/caching/hack/update-deps.sh b/vendor/github.com/knative/caching/hack/update-deps.sh new file mode 100755 index 000000000..4d9479901 --- /dev/null +++ b/vendor/github.com/knative/caching/hack/update-deps.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh + +set -o errexit +set -o nounset +set -o pipefail + +cd ${REPO_ROOT_DIR} + +# Ensure we have everything we need under vendor/ +dep ensure + +rm -rf $(find vendor/ -name 'OWNERS') +rm -rf $(find vendor/ -name '*_test.go') diff --git a/vendor/github.com/knative/caching/hack/verify-codegen.sh b/vendor/github.com/knative/caching/hack/verify-codegen.sh new file mode 100755 index 000000000..5e48a4e65 --- /dev/null +++ b/vendor/github.com/knative/caching/hack/verify-codegen.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +readonly REPO_ROOT_DIR="$(git rev-parse --show-toplevel)" +readonly TMP_DIFFROOT="$(mktemp -d -p ${REPO_ROOT_DIR})" + +cleanup() { + rm -rf "${TMP_DIFFROOT}" +} + +trap "cleanup" EXIT SIGINT + +cleanup + +# Save working tree state +mkdir -p "${TMP_DIFFROOT}/pkg" +cp -aR "${REPO_ROOT_DIR}/Gopkg.lock" "${REPO_ROOT_DIR}/pkg" "${REPO_ROOT_DIR}/vendor" "${TMP_DIFFROOT}" + +# TODO(mattmoor): We should be able to rm -rf pkg/client/ and vendor/ + +"${REPO_ROOT_DIR}/hack/update-codegen.sh" +echo "Diffing ${REPO_ROOT_DIR} against freshly generated codegen" +ret=0 +diff -Naupr "${REPO_ROOT_DIR}/pkg" "${TMP_DIFFROOT}/pkg" || ret=1 +diff -Naupr --no-dereference "${REPO_ROOT_DIR}/vendor" "${TMP_DIFFROOT}/vendor" || ret=1 + +# Restore working tree state +rm -fr "${TMP_DIFFROOT}/config" +rm -fr "${REPO_ROOT_DIR}/Gopkg.lock" "${REPO_ROOT_DIR}/pkg" "${REPO_ROOT_DIR}/vendor" +cp -aR "${TMP_DIFFROOT}"/* "${REPO_ROOT_DIR}" + +if [[ $ret -eq 0 ]] +then + echo "${REPO_ROOT_DIR} up to date." +else + echo "ERROR: ${REPO_ROOT_DIR} is out of date. Please run ./hack/update-codegen.sh" + exit 1 +fi diff --git a/vendor/github.com/knative/caching/pkg/apis/caching/register.go b/vendor/github.com/knative/caching/pkg/apis/caching/register.go new file mode 100644 index 000000000..c938440ac --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/apis/caching/register.go @@ -0,0 +1,27 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package caching + +const ( + GroupName = "caching.internal.knative.dev" + + // ImageClassAnnotationKey is the annotation for the explicit class of caching + // that a particular resource has opted into. For example, + // caching.knative.dev/image.class: foo + // This uses a different domain because unlike the resource, it is user-facing. + ImageClassAnnotationKey = "caching.knative.dev/image.class" +) diff --git a/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/doc.go b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/doc.go new file mode 100644 index 000000000..a2dc6cb08 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=caching.internal.knative.dev +package v1alpha1 diff --git a/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_defaults.go b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_defaults.go new file mode 100644 index 000000000..5aa4471c4 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_defaults.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "context" + +func (r *Image) SetDefaults(ctx context.Context) { + // TODO(mattmoor): This +} diff --git a/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_types.go b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_types.go new file mode 100644 index 000000000..edea28f29 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_types.go @@ -0,0 +1,180 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + "sort" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/knative/pkg/apis" + "github.com/knative/pkg/kmeta" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image is a Knative abstraction that encapsulates the interface by which Knative +// components express a desire to have a particular image cached. +type Image struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds the desired state of the Image (from the client). + // +optional + Spec ImageSpec `json:"spec,omitempty"` + + // Status communicates the observed state of the Image (from the controller). + // +optional + Status ImageStatus `json:"status,omitempty"` +} + +// Check that Image can be validated and defaulted. +var _ apis.Validatable = (*Image)(nil) +var _ apis.Defaultable = (*Image)(nil) +var _ kmeta.OwnerRefable = (*Image)(nil) + +// ImageSpec holds the desired state of the Image (from the client). +type ImageSpec struct { + + // Image is the name of the container image url to cache across the cluster. + Image string `json:"image"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount as which the Pods + // will run this container. This is potentially used to authenticate the image pull + // if the service account has attached pull secrets. For more information: + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // ImagePullSecrets contains the names of the Kubernetes Secrets containing login + // information used by the Pods which will run this container. + // +optional + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` +} + +// ImageConditionType is used to communicate the status of the reconciliation process. +type ImageConditionType string + +const ( + // ImageConditionReady is set when the revision is starting to materialize + // runtime resources, and becomes true when those resources are ready. + ImageConditionReady ImageConditionType = "Ready" +) + +// ImageCondition defines a readiness condition for a Image. +// See: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#typical-status-properties +type ImageCondition struct { + Type ImageConditionType `json:"type" description:"type of Image condition"` + + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // +optional + // We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic + // differences (all other things held constant). + LastTransitionTime apis.VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"` + + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +// ImageStatus communicates the observed state of the Image (from the controller). +type ImageStatus struct { + // Conditions communicates information about ongoing/complete + // reconciliation processes that bring the "spec" inline with the observed + // state of the world. + // +optional + Conditions []ImageCondition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageList is a list of Image resources +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Image `json:"items"` +} + +// IsReady looks at the conditions and if the Status has a condition +// ImageConditionReady returns true if ConditionStatus is True +func (rs *ImageStatus) IsReady() bool { + if c := rs.GetCondition(ImageConditionReady); c != nil { + return c.Status == corev1.ConditionTrue + } + return false +} + +func (rs *ImageStatus) GetCondition(t ImageConditionType) *ImageCondition { + for _, cond := range rs.Conditions { + if cond.Type == t { + return &cond + } + } + return nil +} + +func (rs *ImageStatus) SetCondition(new *ImageCondition) { + if new == nil { + return + } + + t := new.Type + var conditions []ImageCondition + for _, cond := range rs.Conditions { + if cond.Type != t { + conditions = append(conditions, cond) + } else { + // If we'd only update the LastTransitionTime, then return. + new.LastTransitionTime = cond.LastTransitionTime + if reflect.DeepEqual(new, &cond) { + return + } + } + } + new.LastTransitionTime = apis.VolatileTime{metav1.NewTime(time.Now())} + conditions = append(conditions, *new) + // Deterministically order the conditions + sort.Slice(conditions, func(i, j int) bool { return conditions[i].Type < conditions[j].Type }) + rs.Conditions = conditions +} + +func (rs *ImageStatus) InitializeConditions() { + for _, cond := range []ImageConditionType{ + ImageConditionReady, + } { + if rc := rs.GetCondition(cond); rc == nil { + rs.SetCondition(&ImageCondition{ + Type: cond, + Status: corev1.ConditionUnknown, + }) + } + } +} + +func (i *Image) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("Image") +} diff --git a/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_validation.go b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_validation.go new file mode 100644 index 000000000..33a505782 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/image_validation.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + + "github.com/knative/pkg/apis" +) + +func (rt *Image) Validate(ctx context.Context) *apis.FieldError { + return rt.Spec.Validate(ctx).ViaField("spec") +} + +func (rs *ImageSpec) Validate(ctx context.Context) *apis.FieldError { + if rs.Image == "" { + return apis.ErrMissingField("image") + } + // TODO(mattmoor): Consider using go-containerregistry to validate + // the image reference. This is effectively the function we want. + // https://github.com/google/go-containerregistry/blob/2f3e3e1/pkg/name/ref.go#L41 + for index, ips := range rs.ImagePullSecrets { + if equality.Semantic.DeepEqual(ips, corev1.LocalObjectReference{}) { + return apis.ErrMissingField(fmt.Sprintf("imagePullSecrets[%d].name", index)) + } + } + return nil +} diff --git a/vendor/k8s.io/api/node/v1alpha1/register.go b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/register.go similarity index 68% rename from vendor/k8s.io/api/node/v1alpha1/register.go rename to vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/register.go index b6082142a..e5f710a7e 100644 --- a/vendor/k8s.io/api/node/v1alpha1/register.go +++ b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2018 The Knative Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,16 +17,20 @@ limitations under the License. package v1alpha1 import ( + "github.com/knative/caching/pkg/apis/caching" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "node.k8s.io" - // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: caching.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -34,19 +38,16 @@ func Resource(resource string) schema.GroupResource { } var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme ) -// addKnownTypes adds the list of known types to api.Scheme. +// Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &RuntimeClass{}, - &RuntimeClassList{}, + &Image{}, + &ImageList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.deepcopy.go b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/zz_generated.deepcopy.go similarity index 52% rename from vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.deepcopy.go rename to vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/zz_generated.deepcopy.go index 9f55a39dd..ed87656c8 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/knative/caching/pkg/apis/caching/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,33 +18,35 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTestType) DeepCopyInto(out *ClusterTestType) { +func (in *Image) DeepCopyInto(out *Image) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTestType. -func (in *ClusterTestType) DeepCopy() *ClusterTestType { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { if in == nil { return nil } - out := new(ClusterTestType) + out := new(Image) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterTestType) DeepCopyObject() runtime.Object { +func (in *Image) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -52,75 +54,49 @@ func (in *ClusterTestType) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTestTypeList) DeepCopyInto(out *ClusterTestTypeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterTestType, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTestTypeList. -func (in *ClusterTestTypeList) DeepCopy() *ClusterTestTypeList { - if in == nil { - return nil - } - out := new(ClusterTestTypeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterTestTypeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTestTypeStatus) DeepCopyInto(out *ClusterTestTypeStatus) { +func (in *ImageCondition) DeepCopyInto(out *ImageCondition) { *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTestTypeStatus. -func (in *ClusterTestTypeStatus) DeepCopy() *ClusterTestTypeStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageCondition. +func (in *ImageCondition) DeepCopy() *ImageCondition { if in == nil { return nil } - out := new(ClusterTestTypeStatus) + out := new(ImageCondition) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestType) DeepCopyInto(out *TestType) { +func (in *ImageList) DeepCopyInto(out *ImageList) { *out = *in out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Status = in.Status + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestType. -func (in *TestType) DeepCopy() *TestType { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { if in == nil { return nil } - out := new(TestType) + out := new(ImageList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TestType) DeepCopyObject() runtime.Object { +func (in *ImageList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -128,50 +104,45 @@ func (in *TestType) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]TestType, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeList. -func (in *TestTypeList) DeepCopy() *TestTypeList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { if in == nil { return nil } - out := new(TestTypeList) + out := new(ImageSpec) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TestTypeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestTypeStatus) DeepCopyInto(out *TestTypeStatus) { +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ImageCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeStatus. -func (in *TestTypeStatus) DeepCopy() *TestTypeStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { if in == nil { return nil } - out := new(TestTypeStatus) + out := new(ImageStatus) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/knative/caching/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 000000000..fd1a378ba --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + cachingv1alpha1 "github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + CachingV1alpha1() cachingv1alpha1.CachingV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Caching() cachingv1alpha1.CachingV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + cachingV1alpha1 *cachingv1alpha1.CachingV1alpha1Client +} + +// CachingV1alpha1 retrieves the CachingV1alpha1Client +func (c *Clientset) CachingV1alpha1() cachingv1alpha1.CachingV1alpha1Interface { + return c.cachingV1alpha1 +} + +// Deprecated: Caching retrieves the default version of CachingClient. +// Please explicitly pick a version. +func (c *Clientset) Caching() cachingv1alpha1.CachingV1alpha1Interface { + return c.cachingV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.cachingV1alpha1, err = cachingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.cachingV1alpha1 = cachingv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.cachingV1alpha1 = cachingv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/doc.go similarity index 83% rename from vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/doc.go index 3af5d054f..3fe468584 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,5 +16,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -// This package has the automatically generated typed clients. -package v1 +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/clientset_generated.go similarity index 74% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/clientset_generated.go index d52d4bef4..e090858f6 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,14 @@ limitations under the License. package fake import ( + clientset "github.com/knative/caching/pkg/client/clientset/versioned" + cachingv1alpha1 "github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1" + fakecachingv1alpha1 "github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/testing" - clientset "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" - examplev1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1" - fakeexamplev1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -41,7 +41,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{tracker: o} + cs := &Clientset{} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -63,20 +63,20 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - var _ clientset.Interface = &Clientset{} -// ExampleV1 retrieves the ExampleV1Client -func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { - return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} +// CachingV1alpha1 retrieves the CachingV1alpha1Client +func (c *Clientset) CachingV1alpha1() cachingv1alpha1.CachingV1alpha1Interface { + return &fakecachingv1alpha1.FakeCachingV1alpha1{Fake: &c.Fake} +} + +// Caching retrieves the CachingV1alpha1Client +func (c *Clientset) Caching() cachingv1alpha1.CachingV1alpha1Interface { + return &fakecachingv1alpha1.FakeCachingV1alpha1{Fake: &c.Fake} } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/doc.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/doc.go similarity index 94% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/doc.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/doc.go index 9b99e7167..86f64bb7a 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/doc.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/register.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/register.go similarity index 92% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/register.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/register.go index 7fe8bc6a5..69b5344e2 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/fake/register.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,19 +19,19 @@ limitations under the License. package fake import ( + cachingv1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" ) var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - examplev1.AddToScheme, + cachingv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/knative/caching/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..60ea8ba90 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/register.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/scheme/register.go similarity index 92% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/register.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/scheme/register.go index 2cf84f85a..f77d0548d 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/register.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,19 +19,19 @@ limitations under the License. package scheme import ( + cachingv1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" ) var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - examplev1.AddToScheme, + cachingv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/caching_client.go similarity index 54% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/caching_client.go index e7acc27e4..8167378f7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/caching_client.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,27 +19,28 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" + "github.com/knative/caching/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) -type NodeV1alpha1Interface interface { +type CachingV1alpha1Interface interface { RESTClient() rest.Interface - RuntimeClassesGetter + ImagesGetter } -// NodeV1alpha1Client is used to interact with features provided by the node.k8s.io group. -type NodeV1alpha1Client struct { +// CachingV1alpha1Client is used to interact with features provided by the caching.internal.knative.dev group. +type CachingV1alpha1Client struct { restClient rest.Interface } -func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { - return newRuntimeClasses(c) +func (c *CachingV1alpha1Client) Images(namespace string) ImageInterface { + return newImages(c, namespace) } -// NewForConfig creates a new NodeV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { +// NewForConfig creates a new CachingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*CachingV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +49,12 @@ func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { if err != nil { return nil, err } - return &NodeV1alpha1Client{client}, nil + return &CachingV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new NodeV1alpha1Client for the given config and +// NewForConfigOrDie creates a new CachingV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NodeV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *CachingV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,16 +62,16 @@ func NewForConfigOrDie(c *rest.Config) *NodeV1alpha1Client { return client } -// New creates a new NodeV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *NodeV1alpha1Client { - return &NodeV1alpha1Client{c} +// New creates a new CachingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CachingV1alpha1Client { + return &CachingV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() @@ -81,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *NodeV1alpha1Client) RESTClient() rest.Interface { +func (c *CachingV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/doc.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/doc.go index 771101956..75445c179 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1beta1 +package v1alpha1 diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/doc.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/doc.go similarity index 94% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/doc.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/doc.go index 16f443990..128aa183a 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/doc.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_example_client.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_caching_client.go similarity index 66% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_example_client.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_caching_client.go index 265930a0e..2c63550c8 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_example_client.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_caching_client.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,26 +19,22 @@ limitations under the License. package fake import ( + v1alpha1 "github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" - v1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1" ) -type FakeExampleV1 struct { +type FakeCachingV1alpha1 struct { *testing.Fake } -func (c *FakeExampleV1) ClusterTestTypes() v1.ClusterTestTypeInterface { - return &FakeClusterTestTypes{c} -} - -func (c *FakeExampleV1) TestTypes(namespace string) v1.TestTypeInterface { - return &FakeTestTypes{c, namespace} +func (c *FakeCachingV1alpha1) Images(namespace string) v1alpha1.ImageInterface { + return &FakeImages{c, namespace} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeExampleV1) RESTClient() rest.Interface { +func (c *FakeCachingV1alpha1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_image.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_image.go new file mode 100644 index 000000000..5046833de --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/fake/fake_image.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImages implements ImageInterface +type FakeImages struct { + Fake *FakeCachingV1alpha1 + ns string +} + +var imagesResource = schema.GroupVersionResource{Group: "caching.internal.knative.dev", Version: "v1alpha1", Resource: "images"} + +var imagesKind = schema.GroupVersionKind{Group: "caching.internal.knative.dev", Version: "v1alpha1", Kind: "Image"} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *FakeImages) Get(name string, options v1.GetOptions) (result *v1alpha1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagesResource, c.ns, name), &v1alpha1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Image), err +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *FakeImages) List(opts v1.ListOptions) (result *v1alpha1.ImageList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagesResource, imagesKind, c.ns, opts), &v1alpha1.ImageList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ImageList{ListMeta: obj.(*v1alpha1.ImageList).ListMeta} + for _, item := range obj.(*v1alpha1.ImageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *FakeImages) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(imagesResource, c.ns, opts)) + +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Create(image *v1alpha1.Image) (result *v1alpha1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagesResource, c.ns, image), &v1alpha1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Image), err +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Update(image *v1alpha1.Image) (result *v1alpha1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagesResource, c.ns, image), &v1alpha1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Image), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImages) UpdateStatus(image *v1alpha1.Image) (*v1alpha1.Image, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(imagesResource, "status", c.ns, image), &v1alpha1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Image), err +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *FakeImages) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(imagesResource, c.ns, name), &v1alpha1.Image{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImages) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(imagesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ImageList{}) + return err +} + +// Patch applies the patch and returns the patched image. +func (c *FakeImages) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagesResource, c.ns, name, data, subresources...), &v1alpha1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Image), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/generated_expansion.go similarity index 89% rename from vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go rename to vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/generated_expansion.go index f0f511726..5b25aca6e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type AuditSinkExpansion interface{} +type ImageExpansion interface{} diff --git a/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/image.go b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/image.go new file mode 100644 index 000000000..da1af3212 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/client/clientset/versioned/typed/caching/v1alpha1/image.go @@ -0,0 +1,174 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" + scheme "github.com/knative/caching/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ImagesGetter has a method to return a ImageInterface. +// A group's client should implement this interface. +type ImagesGetter interface { + Images(namespace string) ImageInterface +} + +// ImageInterface has methods to work with Image resources. +type ImageInterface interface { + Create(*v1alpha1.Image) (*v1alpha1.Image, error) + Update(*v1alpha1.Image) (*v1alpha1.Image, error) + UpdateStatus(*v1alpha1.Image) (*v1alpha1.Image, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Image, error) + List(opts v1.ListOptions) (*v1alpha1.ImageList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Image, err error) + ImageExpansion +} + +// images implements ImageInterface +type images struct { + client rest.Interface + ns string +} + +// newImages returns a Images +func newImages(c *CachingV1alpha1Client, namespace string) *images { + return &images{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *images) Get(name string, options v1.GetOptions) (result *v1alpha1.Image, err error) { + result = &v1alpha1.Image{} + err = c.client.Get(). + Namespace(c.ns). + Resource("images"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *images) List(opts v1.ListOptions) (result *v1alpha1.ImageList, err error) { + result = &v1alpha1.ImageList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("images"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *images) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("images"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *images) Create(image *v1alpha1.Image) (result *v1alpha1.Image, err error) { + result = &v1alpha1.Image{} + err = c.client.Post(). + Namespace(c.ns). + Resource("images"). + Body(image). + Do(). + Into(result) + return +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *images) Update(image *v1alpha1.Image) (result *v1alpha1.Image, err error) { + result = &v1alpha1.Image{} + err = c.client.Put(). + Namespace(c.ns). + Resource("images"). + Name(image.Name). + Body(image). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *images) UpdateStatus(image *v1alpha1.Image) (result *v1alpha1.Image, err error) { + result = &v1alpha1.Image{} + err = c.client.Put(). + Namespace(c.ns). + Resource("images"). + Name(image.Name). + SubResource("status"). + Body(image). + Do(). + Into(result) + return +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *images) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("images"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *images) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("images"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched image. +func (c *images) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Image, err error) { + result = &v1alpha1.Image{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("images"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/informers/node/interface.go b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/interface.go similarity index 74% rename from vendor/k8s.io/client-go/informers/node/interface.go rename to vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/interface.go index 977369379..f561ac17e 100644 --- a/vendor/k8s.io/client-go/informers/node/interface.go +++ b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,20 +16,17 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package node +package caching import ( - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1alpha1 "k8s.io/client-go/informers/node/v1alpha1" - v1beta1 "k8s.io/client-go/informers/node/v1beta1" + v1alpha1 "github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1" + internalinterfaces "github.com/knative/caching/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface - // V1beta1 provides access to shared informers for resources in V1beta1. - V1beta1() v1beta1.Interface } type group struct { @@ -47,8 +44,3 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } - -// V1beta1 returns a new v1beta1.Interface. -func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1/image.go b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1/image.go new file mode 100644 index 000000000..95793c185 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1/image.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + cachingv1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" + versioned "github.com/knative/caching/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/caching/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/caching/pkg/client/listers/caching/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CachingV1alpha1().Images(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CachingV1alpha1().Images(namespace).Watch(options) + }, + }, + &cachingv1alpha1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&cachingv1alpha1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() v1alpha1.ImageLister { + return v1alpha1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/coordination/v1/interface.go b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1/interface.go similarity index 76% rename from vendor/k8s.io/client-go/informers/coordination/v1/interface.go rename to vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1/interface.go index 05c4acbef..9eda181ad 100644 --- a/vendor/k8s.io/client-go/informers/coordination/v1/interface.go +++ b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/caching/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,16 +16,16 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + internalinterfaces "github.com/knative/caching/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. type Interface interface { - // Leases returns a LeaseInformer. - Leases() LeaseInformer + // Images returns a ImageInformer. + Images() ImageInformer } type version struct { @@ -39,7 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// Leases returns a LeaseInformer. -func (v *version) Leases() LeaseInformer { - return &leaseInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/factory.go b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/factory.go similarity index 92% rename from vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/factory.go rename to vendor/github.com/knative/caching/pkg/client/informers/externalversions/factory.go index 5a2d8f748..b31e38dda 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/factory.go +++ b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,13 +23,13 @@ import ( sync "sync" time "time" + versioned "github.com/knative/caching/pkg/client/clientset/versioned" + caching "github.com/knative/caching/pkg/client/informers/externalversions/caching" + internalinterfaces "github.com/knative/caching/pkg/client/informers/externalversions/internalinterfaces" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" - versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" - example "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example" - internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" ) // SharedInformerOption defines the functional option type for SharedInformerFactory. @@ -172,9 +172,9 @@ type SharedInformerFactory interface { ForResource(resource schema.GroupVersionResource) (GenericInformer, error) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - Example() example.Interface + Caching() caching.Interface } -func (f *sharedInformerFactory) Example() example.Interface { - return example.New(f, f.namespace, f.tweakListOptions) +func (f *sharedInformerFactory) Caching() caching.Interface { + return caching.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/generic.go b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/generic.go similarity index 79% rename from vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/generic.go rename to vendor/github.com/knative/caching/pkg/client/informers/externalversions/generic.go index e039c8edf..5b935ccde 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/generic.go +++ b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ package externalversions import ( "fmt" + v1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" - v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other @@ -52,11 +52,9 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=example.crd.code-generator.k8s.io, Version=v1 - case v1.SchemeGroupVersion.WithResource("clustertesttypes"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Example().V1().ClusterTestTypes().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("testtypes"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Example().V1().TestTypes().Informer()}, nil + // Group=caching.internal.knative.dev, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Caching().V1alpha1().Images().Informer()}, nil } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go similarity index 80% rename from vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/factory_interfaces.go rename to vendor/github.com/knative/caching/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 2ed31b44d..7681abb4a 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/github.com/knative/caching/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,13 +21,12 @@ package internalinterfaces import ( time "time" + versioned "github.com/knative/caching/pkg/client/clientset/versioned" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" - versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" ) -// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -36,5 +35,4 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go b/vendor/github.com/knative/caching/pkg/client/listers/caching/v1alpha1/expansion_generated.go similarity index 67% rename from vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go rename to vendor/github.com/knative/caching/pkg/client/listers/caching/v1alpha1/expansion_generated.go index a65c208fa..78c5ac732 100644 --- a/vendor/k8s.io/client-go/listers/node/v1alpha1/expansion_generated.go +++ b/vendor/github.com/knative/caching/pkg/client/listers/caching/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,10 @@ limitations under the License. package v1alpha1 -// RuntimeClassListerExpansion allows custom methods to be added to -// RuntimeClassLister. -type RuntimeClassListerExpansion interface{} +// ImageListerExpansion allows custom methods to be added to +// ImageLister. +type ImageListerExpansion interface{} + +// ImageNamespaceListerExpansion allows custom methods to be added to +// ImageNamespaceLister. +type ImageNamespaceListerExpansion interface{} diff --git a/vendor/github.com/knative/caching/pkg/client/listers/caching/v1alpha1/image.go b/vendor/github.com/knative/caching/pkg/client/listers/caching/v1alpha1/image.go new file mode 100644 index 000000000..4ee740e43 --- /dev/null +++ b/vendor/github.com/knative/caching/pkg/client/listers/caching/v1alpha1/image.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/caching/pkg/apis/caching/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageLister helps list Images. +type ImageLister interface { + // List lists all Images in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Image, err error) + // Images returns an object that can list and get Images. + Images(namespace string) ImageNamespaceLister + ImageListerExpansion +} + +// imageLister implements the ImageLister interface. +type imageLister struct { + indexer cache.Indexer +} + +// NewImageLister returns a new ImageLister. +func NewImageLister(indexer cache.Indexer) ImageLister { + return &imageLister{indexer: indexer} +} + +// List lists all Images in the indexer. +func (s *imageLister) List(selector labels.Selector) (ret []*v1alpha1.Image, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Image)) + }) + return ret, err +} + +// Images returns an object that can list and get Images. +func (s *imageLister) Images(namespace string) ImageNamespaceLister { + return imageNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageNamespaceLister helps list and get Images. +type ImageNamespaceLister interface { + // List lists all Images in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Image, err error) + // Get retrieves the Image from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Image, error) + ImageNamespaceListerExpansion +} + +// imageNamespaceLister implements the ImageNamespaceLister +// interface. +type imageNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Images in the indexer for a given namespace. +func (s imageNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Image, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Image)) + }) + return ret, err +} + +// Get retrieves the Image from the indexer for a given namespace and name. +func (s imageNamespaceLister) Get(name string) (*v1alpha1.Image, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("image"), name) + } + return obj.(*v1alpha1.Image), nil +} diff --git a/vendor/github.com/knative/caching/test/OWNERS b/vendor/github.com/knative/caching/test/OWNERS new file mode 100644 index 000000000..ed29d4015 --- /dev/null +++ b/vendor/github.com/knative/caching/test/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- adrcunha +- jessiezcc +- srinivashegde86 +- steuhs diff --git a/vendor/github.com/knative/caching/test/presubmit-tests.sh b/vendor/github.com/knative/caching/test/presubmit-tests.sh new file mode 100755 index 000000000..d5d891322 --- /dev/null +++ b/vendor/github.com/knative/caching/test/presubmit-tests.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the presubmit tests; it is started by prow for each PR. +# For convenience, it can also be executed manually. +# Running the script without parameters, or with the --all-tests +# flag, causes all tests to be executed, in the right order. +# Use the flags --build-tests, --unit-tests and --integration-tests +# to run a specific set of tests. + +source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh + +# TODO(mattmoor): integration tests + +# We use the default build, unit and integration test runners. + +main $@ diff --git a/vendor/github.com/knative/pkg/.gitattributes b/vendor/github.com/knative/pkg/.gitattributes new file mode 100644 index 000000000..95f18a118 --- /dev/null +++ b/vendor/github.com/knative/pkg/.gitattributes @@ -0,0 +1,10 @@ +# This file is documented at https://git-scm.com/docs/gitattributes. +# Linguist-specific attributes are documented at +# https://github.com/github/linguist. + +**/zz_generated.*.go linguist-generated=true +/client/** linguist-generated=true +/test/** coverage-excluded=true +/metrics/gcp_metadata.go coverage-excluded=true + +*.sh text eol=lf diff --git a/vendor/github.com/knative/pkg/.github/issue-template.md b/vendor/github.com/knative/pkg/.github/issue-template.md new file mode 100644 index 000000000..cb9b5e00a --- /dev/null +++ b/vendor/github.com/knative/pkg/.github/issue-template.md @@ -0,0 +1,32 @@ + + +## Expected Behavior + +## Actual Behavior + +## Steps to Reproduce the Problem + +1. +2. +3. + +## Additional Info diff --git a/vendor/github.com/knative/pkg/.github/pull-request-template.md b/vendor/github.com/knative/pkg/.github/pull-request-template.md new file mode 100644 index 000000000..033ec443c --- /dev/null +++ b/vendor/github.com/knative/pkg/.github/pull-request-template.md @@ -0,0 +1,6 @@ + diff --git a/vendor/github.com/knative/pkg/.gitignore b/vendor/github.com/knative/pkg/.gitignore new file mode 100644 index 000000000..85baa82ae --- /dev/null +++ b/vendor/github.com/knative/pkg/.gitignore @@ -0,0 +1,11 @@ +# Operating system temporary files +.DS_Store + +# Editor/IDE specific settings +.idea +.vscode/ +*.iml + +# Temporary output of build tools +bazel-* +*.out diff --git a/vendor/github.com/knative/pkg/CONTRIBUTING.md b/vendor/github.com/knative/pkg/CONTRIBUTING.md new file mode 100644 index 000000000..16eddb0ac --- /dev/null +++ b/vendor/github.com/knative/pkg/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contribution guidelines + +So you want to hack on Knative `pkg`? Yay! Please refer to Knative's overall +[contribution guidelines](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md) +to find out how you can help. diff --git a/vendor/github.com/knative/pkg/DEVELOPMENT.md b/vendor/github.com/knative/pkg/DEVELOPMENT.md new file mode 100644 index 000000000..31565202e --- /dev/null +++ b/vendor/github.com/knative/pkg/DEVELOPMENT.md @@ -0,0 +1,71 @@ +# Development + +This doc explains how to setup a development environment so you can get started +[contributing](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md) +to Knative `pkg`. Also take a look at: + +- [The pull request workflow](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md#pull-requests) + +## Getting started + +1. Create [a GitHub account](https://github.com/join) +1. Setup + [GitHub access via SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) +1. Install [requirements](#requirements) +1. Set up your [shell environment](#environment-setup) +1. [Create and checkout a repo fork](#checkout-your-fork) + +Before submitting a PR, see also [CONTRIBUTING.md](./CONTRIBUTING.md). + +### Requirements + +You must install these tools: + +1. [`go`](https://golang.org/doc/install): The language Knative `pkg` is built + in +1. [`git`](https://help.github.com/articles/set-up-git/): For source control +1. [`dep`](https://github.com/golang/dep): For managing external dependencies. + +### Environment setup + +To get started you'll need to set these environment variables (we recommend +adding them to your `.bashrc`): + +1. `GOPATH`: If you don't have one, simply pick a directory and add + `export GOPATH=...` +1. `$GOPATH/bin` on `PATH`: This is so that tooling installed via `go get` will + work properly. + +`.bashrc` example: + +```shell +export GOPATH="$HOME/go" +export PATH="${PATH}:${GOPATH}/bin" +``` + +### Checkout your fork + +The Go tools require that you clone the repository to the +`src/github.com/knative/pkg` directory in your +[`GOPATH`](https://github.com/golang/go/wiki/SettingGOPATH). + +To check out this repository: + +1. Create your own + [fork of this repo](https://help.github.com/articles/fork-a-repo/) +1. Clone it to your machine: + +```shell +mkdir -p ${GOPATH}/src/github.com/knative +cd ${GOPATH}/src/github.com/knative +git clone git@github.com:${YOUR_GITHUB_USERNAME}/pkg.git +cd pkg +git remote add upstream git@github.com:knative/pkg.git +git remote set-url --push upstream no_push +``` + +_Adding the `upstream` remote sets you up nicely for regularly +[syncing your fork](https://help.github.com/articles/syncing-a-fork/)._ + +Once you reach this point you are ready to do a full build and deploy as +described below. diff --git a/vendor/github.com/knative/pkg/Gopkg.lock b/vendor/github.com/knative/pkg/Gopkg.lock new file mode 100644 index 000000000..90cf61935 --- /dev/null +++ b/vendor/github.com/knative/pkg/Gopkg.lock @@ -0,0 +1,1203 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:ef8da480a66d7e8e9819261c3526685601b573e0005e84b75e47548d82021a7d" + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "container/apiv1", + "internal/version", + "monitoring/apiv3", + "trace/apiv2", + ] + pruneopts = "NUT" + revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430" + version = "v0.34.0" + +[[projects]] + digest = "1:43fbf05ea84c860a4e86b557d156b1e72511cd29375d3f71adb522362710aea7" + name = "contrib.go.opencensus.io/exporter/stackdriver" + packages = [ + ".", + "monitoredresource", + ] + pruneopts = "NUT" + revision = "ab5a58af316a529613aadf9f50eeed1b6f044b2f" + version = "v0.9.2" + +[[projects]] + branch = "master" + digest = "1:cef70b547ce62d12ea8e5dcb9905bccb57ea1bb253ee6809fd79a17c29ca3cd5" + name = "contrib.go.opencensus.io/resource" + packages = ["resourcekeys"] + pruneopts = "NUT" + revision = "21591786a5e0c21806209b266cc6dfdfa85b3cdb" + +[[projects]] + digest = "1:4a31397b1b81c6856aab6d2d963a727b4235af18adaaedc2cc51646ae812f683" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/xml/xmlutil", + "service/sts", + ] + pruneopts = "NUT" + revision = "3991042237b45cf58c9d5f34295942d5533c28c6" + version = "v1.16.11" + +[[projects]] + branch = "master" + digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "NUT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:fa965c1fd0f17153f608037e109e62104058bc1d08d44849867795fd306fa8b8" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + ] + pruneopts = "NUT" + revision = "7f2434bc10da710debe5c4315ed6d4df454b4024" + version = "v0.1.0" + +[[projects]] + digest = "1:6b21090f60571b20b3ddc2c8e48547dffcf409498ed6002c2cada023725ed377" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "NUT" + revision = "782f4967f2dc4564575ca782fe2d04090b5faca8" + +[[projects]] + digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f" + name = "github.com/evanphx/json-patch" + packages = ["."] + pruneopts = "NUT" + revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" + version = "v4.1.0" + +[[projects]] + digest = "1:abfe129dc92b16fbf0cc9d6336096a2823151756f62072a700eb10754141b38e" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee" + +[[projects]] + digest = "1:373397317168dd5ac00efda13940668f1947fd641f572b9cf386a86a99c63ca9" + name = "github.com/gobuffalo/envy" + packages = ["."] + pruneopts = "NUT" + revision = "801d7253ade1f895f74596b9a96147ed2d3b087e" + version = "v1.6.11" + +[[projects]] + digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "NUT" + revision = "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + +[[projects]] + digest = "1:78b8040ece2ff622580def2708b9eb0b2857711b6744c475439bf337e9c677ea" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "NUT" + revision = "44145f04b68cf362d9c4df2182967c2275eaefed" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "NUT" + revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b" + +[[projects]] + digest = "1:4dacf728c83400b3e9d1d3025dd3c1e93e9a1b033726d1b193dc209f3fa9cb7a" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/empty", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "NUT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:245bd4eb633039cd66106a5d340ae826d87f4e36a8602fcc940e14176fd26ea7" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "NUT" + revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4" + +[[projects]] + digest = "1:010d46ea3c1e730897e53058d1013a963f3f987675dda87df64f891b945281db" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/cmpopts", + "cmp/internal/diff", + "cmp/internal/flags", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "NUT" + revision = "6f77996f0c42f7b84e5a2b252227263f93432e9b" + +[[projects]] + digest = "1:f9425215dccf1c63f659ec781ca46bc81804341821d0cd8d2459c5b58f8bd067" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "NUT" + revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c" + +[[projects]] + branch = "master" + digest = "1:0d5e3798bfa2642ac268341c96710b8def1f3cbc3bc803c421d90704d72107d8" + name = "github.com/google/licenseclassifier" + packages = [ + ".", + "internal/sets", + "stringclassifier", + "stringclassifier/internal/pq", + "stringclassifier/searchset", + "stringclassifier/searchset/tokenizer", + ] + pruneopts = "NUT" + revision = "e979a0b10eebe748549c702a25e997c556349da6" + +[[projects]] + digest = "1:ab3ec1fe3e39bac4b3ab63390767766622be35b7cab03f47f787f9ec60522a53" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "NUT" + revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" + version = "v1.1.1" + +[[projects]] + digest = "1:fd4d1f4c2d75aee3833ee7d8ef11fcf42ddec3c63d1819548288c3d868d6eb14" + name = "github.com/googleapis/gax-go" + packages = [ + ".", + "v2", + ] + pruneopts = "NUT" + revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf" + version = "v2.0.3" + +[[projects]] + digest = "1:27b4ab41ffdc76ad6db56db327a4db234a59588ef059fc3fd678ba0bc6b9094f" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "NUT" + revision = "0c5108395e2debce0d731cf0287ddf7242066aba" + +[[projects]] + digest = "1:4a0c072e44da763409da72d41492373a034baf2e6d849c76d239b4abdfbb6c49" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "NUT" + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + +[[projects]] + branch = "master" + digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "NUT" + revision = "9cad4c3443a7200dd6400aef47183728de563a38" + +[[projects]] + digest = "1:475b179287e8afdcd352014b2c2500e67decdf63e66125e2129286873453e1cd" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "NUT" + revision = "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" + +[[projects]] + digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NUT" + revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" + version = "v0.3.6" + +[[projects]] + digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "NUT" + revision = "c2b33e84" + +[[projects]] + digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a" + name = "github.com/joho/godotenv" + packages = ["."] + pruneopts = "NUT" + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + +[[projects]] + digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "NUT" + revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" + +[[projects]] + branch = "master" + digest = "1:1bfc083da5bbeb7abaac53c56890eb14eb11bac9ec985bfe338c4bbb0540c9ba" + name = "github.com/knative/test-infra" + packages = [ + "scripts", + "tools/dep-collector", + ] + pruneopts = "UT" + revision = "1576da30069624094cf01719452da944b3046826" + +[[projects]] + digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" + name = "github.com/markbates/inflect" + packages = ["."] + pruneopts = "NUT" + revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" + version = "v1.0.4" + +[[projects]] + branch = "master" + digest = "1:0e9bfc47ab9941ecc3344e580baca5deb4091177e84dd9773b48b38ec26b93d5" + name = "github.com/mattbaird/jsonpatch" + packages = ["."] + pruneopts = "NUT" + revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" + +[[projects]] + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "NUT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "NUT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "NUT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + digest = "1:22d4043da943b919108e0d1b07983b8d29edeadfba9fb8f3213208d3e9798aae" + name = "github.com/openzipkin/zipkin-go" + packages = [ + ".", + "idgenerator", + "model", + "propagation", + "reporter", + "reporter/http", + "reporter/recorder", + ] + pruneopts = "NUT" + revision = "1b5162aa314e6ccfcf83777bfb5218988c9e8283" + version = "v0.1.6" + +[[projects]] + branch = "master" + digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "NUT" + revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + +[[projects]] + digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "NUT" + revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" + version = "v2.0.1" + +[[projects]] + digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "NUT" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:7c7cfeecd2b7147bcfec48a4bf622b4879e26aec145a9e373ce51d0c23b16f6b" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "NUT" + revision = "505eaef017263e299324067d40ca2c48f6a2cf50" + version = "v0.9.2" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "NUT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + +[[projects]] + branch = "master" + digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "NUT" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + +[[projects]] + branch = "master" + digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "NUT" + revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" + +[[projects]] + digest = "1:e09ada96a5a41deda4748b1659cc8953961799e798aea557257b56baee4ecaf3" + name = "github.com/rogpeppe/go-internal" + packages = [ + "modfile", + "module", + "semver", + ] + pruneopts = "NUT" + revision = "4bbc89b6501cca7dd6b5557d78d70c8d2c6e8b97" + +[[projects]] + digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + pruneopts = "NUT" + revision = "1744e2970ca51c86172c8190fadad617561ed6e7" + version = "v1.0.0" + +[[projects]] + digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NUT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + digest = "1:b8baa7541ef444be218da02d3a7b607d33513263660e489d86d429afbffcdd86" + name = "go.opencensus.io" + packages = [ + ".", + "exporter/prometheus", + "exporter/zipkin", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "NUT" + revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38" + version = "v0.20.2" + +[[projects]] + digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "NUT" + revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" + version = "v1.3.2" + +[[projects]] + digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "NUT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:5ab79d2a36037de1ab2908733a2cab0c08b12f6956e3e1eab07cd1b2abf7b903" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "internal/ztest", + "zapcore", + "zaptest", + ] + pruneopts = "NUT" + revision = "67bc79d13d155c02fd008f721863ff8cc5f30659" + +[[projects]] + digest = "1:624a05c7c6ed502bf77364cd3d54631383dafc169982fddd8ee77b53c3d9cccf" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NUT" + revision = "81e90905daefcd6fd217b62423c0908922eadb30" + +[[projects]] + branch = "master" + digest = "1:3033eba8bb0c8f2c6720e68e4c14e55b577ae9debb5f5b7b8cc6f319d89edc82" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "NUT" + revision = "49bb7cea24b1df9410e1712aa6433dae904ff66a" + +[[projects]] + branch = "master" + digest = "1:dcb89c032286a9c3c5118a1496f8e0e237c1437f5356ac9602f6fdef560a5c21" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "NUT" + revision = "c57b0facaced709681d9f90397429b9430a74754" + +[[projects]] + branch = "master" + digest = "1:c313aef534e493304f3666fbd24dca5932ebf776a82b7a40f961c9355794a1b1" + name = "golang.org/x/sync" + packages = [ + "errgroup", + "semaphore", + ] + pruneopts = "NUT" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" + +[[projects]] + branch = "master" + digest = "1:a801d3c417117b67a96353daad340b250619780b75c29b652ea13697c946553e" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "NUT" + revision = "e072cadbbdc8dd3d3ffa82b8b4b9304c261d9311" + +[[projects]] + digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "NUT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + digest = "1:d37b0ef2944431fe9e8ef35c6fffc8990d9e2ca300588df94a6890f3649ae365" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "NUT" + revision = "f51c12702a4d776e4c1fa9b0fabab841babae631" + +[[projects]] + branch = "master" + digest = "1:e1c96c8c8ce0af57da9dccb008e540b3d13b55ea04b530fb4fceb81706082bdd" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports", + "internal/fastwalk", + ] + pruneopts = "NUT" + revision = "bfb5194568d3c40db30de765edc44cae9fc94671" + +[[projects]] + branch = "master" + digest = "1:7689634b1a2940f3e725a37a7598b5462674a5b016b17d8ce22c8f71cacb0b34" + name = "google.golang.org/api" + packages = [ + "googleapi/transport", + "internal", + "iterator", + "option", + "support/bundler", + "transport", + "transport/grpc", + "transport/http", + "transport/http/internal/propagation", + ] + pruneopts = "NUT" + revision = "ce4acf611b3920b111e21272a15ddaea10c1fd2e" + +[[projects]] + digest = "1:898bf528e5c601c4a1111586f75ab9515467ebe7a41ae849d5a839720d4e2580" + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/socket", + "internal/urlfetch", + "socket", + "urlfetch", + ] + pruneopts = "NUT" + revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" + version = "v1.4.0" + +[[projects]] + branch = "master" + digest = "1:3689f4cc57cc55b4631efc4b778c47d2a888a7060b4108f42cf289a2033be5ba" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api", + "googleapis/api/annotations", + "googleapis/api/distribution", + "googleapis/api/label", + "googleapis/api/metric", + "googleapis/api/monitoredres", + "googleapis/container/v1", + "googleapis/devtools/cloudtrace/v2", + "googleapis/monitoring/v3", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "NUT" + revision = "e7d98fc518a78c9f8b5ee77be7b0b317475d89e1" + +[[projects]] + digest = "1:40d377bfddee53c669db275071aa08b68d021941311580d902ab7c862d8741c1" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "credentials/oauth", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "NUT" + revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8" + version = "v1.17.0" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "NUT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NUT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + digest = "1:1d34342a53d8f8c625260d6c4c2e5c99442b1635bbf4208f426bd12aa210b870" + name = "k8s.io/api" + packages = [ + "admission/v1beta1", + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "NUT" + revision = "145d52631d00cbfe68490d19ae4f0f501fd31a95" + version = "kubernetes-1.12.6" + +[[projects]] + digest = "1:6def8acd040e85b3becb1dd73de1f22a787fd9776e32aa3bbac9cf9727b53d0d" + name = "k8s.io/apiextensions-apiserver" + packages = [ + "pkg/apis/apiextensions", + "pkg/apis/apiextensions/v1beta1", + "pkg/client/clientset/clientset", + "pkg/client/clientset/clientset/fake", + "pkg/client/clientset/clientset/scheme", + "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", + "pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake", + "pkg/client/informers/externalversions", + "pkg/client/informers/externalversions/apiextensions", + "pkg/client/informers/externalversions/apiextensions/v1beta1", + "pkg/client/informers/externalversions/internalinterfaces", + "pkg/client/listers/apiextensions/v1beta1", + ] + pruneopts = "NUT" + revision = "bd0469a053ff88529a61145790499fe78a09a49d" + version = "kubernetes-1.12.6" + +[[projects]] + digest = "1:119ae04ee44c5d179dcde1ee686f057cfe3fc54a7ee8484b920932a80309e88b" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/equality", + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/api/validation", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1/validation", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/sets/types", + "pkg/util/strategicpatch", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "NUT" + revision = "01f179d85dbce0f2e0e4351a92394b38694b7cae" + version = "kubernetes-1.12.6" + +[[projects]] + digest = "1:b7dd0420e85cb2968ffb945f2810ea6c796dc2a08660618e2200c08c596f0624" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/fake", + "dynamic", + "dynamic/fake", + "informers", + "informers/admissionregistration", + "informers/admissionregistration/v1alpha1", + "informers/admissionregistration/v1beta1", + "informers/apps", + "informers/apps/v1", + "informers/apps/v1beta1", + "informers/apps/v1beta2", + "informers/autoscaling", + "informers/autoscaling/v1", + "informers/autoscaling/v2beta1", + "informers/autoscaling/v2beta2", + "informers/batch", + "informers/batch/v1", + "informers/batch/v1beta1", + "informers/batch/v2alpha1", + "informers/certificates", + "informers/certificates/v1beta1", + "informers/coordination", + "informers/coordination/v1beta1", + "informers/core", + "informers/core/v1", + "informers/events", + "informers/events/v1beta1", + "informers/extensions", + "informers/extensions/v1beta1", + "informers/internalinterfaces", + "informers/networking", + "informers/networking/v1", + "informers/policy", + "informers/policy/v1beta1", + "informers/rbac", + "informers/rbac/v1", + "informers/rbac/v1alpha1", + "informers/rbac/v1beta1", + "informers/scheduling", + "informers/scheduling/v1alpha1", + "informers/scheduling/v1beta1", + "informers/settings", + "informers/settings/v1alpha1", + "informers/storage", + "informers/storage/v1", + "informers/storage/v1alpha1", + "informers/storage/v1beta1", + "kubernetes", + "kubernetes/fake", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1alpha1/fake", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/admissionregistration/v1beta1/fake", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1/fake", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta1/fake", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/apps/v1beta2/fake", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1/fake", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authentication/v1beta1/fake", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1/fake", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/authorization/v1beta1/fake", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v1/fake", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta1/fake", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/autoscaling/v2beta2/fake", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1/fake", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v1beta1/fake", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/batch/v2alpha1/fake", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/certificates/v1beta1/fake", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/coordination/v1beta1/fake", + "kubernetes/typed/core/v1", + "kubernetes/typed/core/v1/fake", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/events/v1beta1/fake", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/extensions/v1beta1/fake", + "kubernetes/typed/networking/v1", + "kubernetes/typed/networking/v1/fake", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/policy/v1beta1/fake", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1/fake", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1alpha1/fake", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/rbac/v1beta1/fake", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1alpha1/fake", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/scheduling/v1beta1/fake", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/settings/v1alpha1/fake", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1/fake", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1alpha1/fake", + "kubernetes/typed/storage/v1beta1", + "kubernetes/typed/storage/v1beta1/fake", + "listers/admissionregistration/v1alpha1", + "listers/admissionregistration/v1beta1", + "listers/apps/v1", + "listers/apps/v1beta1", + "listers/apps/v1beta2", + "listers/autoscaling/v1", + "listers/autoscaling/v2beta1", + "listers/autoscaling/v2beta2", + "listers/batch/v1", + "listers/batch/v1beta1", + "listers/batch/v2alpha1", + "listers/certificates/v1beta1", + "listers/coordination/v1beta1", + "listers/core/v1", + "listers/events/v1beta1", + "listers/extensions/v1beta1", + "listers/networking/v1", + "listers/policy/v1beta1", + "listers/rbac/v1", + "listers/rbac/v1alpha1", + "listers/rbac/v1beta1", + "listers/scheduling/v1alpha1", + "listers/scheduling/v1beta1", + "listers/settings/v1alpha1", + "listers/storage/v1", + "listers/storage/v1alpha1", + "listers/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "rest", + "rest/watch", + "testing", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "tools/pager", + "tools/record", + "tools/reference", + "transport", + "util/buffer", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/retry", + "util/workqueue", + ] + pruneopts = "NUT" + revision = "78295b709ec6fa5be12e35892477a326dea2b5d3" + version = "kubernetes-1.12.6" + +[[projects]] + digest = "1:26b81b5e76e3f84ea5140da4f74649576e470f79091d2ef8e0d1b5000bc636ca" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "pkg/util", + ] + pruneopts = "T" + revision = "b1289fc74931d4b6b04bd1a259acfc88a2cb0a66" + version = "kubernetes-1.12.6" + +[[projects]] + branch = "master" + digest = "1:39912eb5f8eaf46486faae0839586c27c93423e552f76875defa048f52c15c15" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "NUT" + revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" + +[[projects]] + digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "NUT" + revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" + version = "v0.2.0" + +[[projects]] + branch = "master" + digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54" + name = "k8s.io/kube-openapi" + packages = ["pkg/util/proto"] + pruneopts = "NUT" + revision = "e3762e86a74c878ffed47484592986685639c2cd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "cloud.google.com/go/compute/metadata", + "contrib.go.opencensus.io/exporter/stackdriver", + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource", + "github.com/davecgh/go-spew/spew", + "github.com/evanphx/json-patch", + "github.com/ghodss/yaml", + "github.com/golang/glog", + "github.com/google/go-cmp/cmp", + "github.com/google/go-cmp/cmp/cmpopts", + "github.com/google/uuid", + "github.com/gorilla/websocket", + "github.com/knative/test-infra/scripts", + "github.com/knative/test-infra/tools/dep-collector", + "github.com/markbates/inflect", + "github.com/mattbaird/jsonpatch", + "github.com/openzipkin/zipkin-go", + "github.com/openzipkin/zipkin-go/model", + "github.com/openzipkin/zipkin-go/reporter", + "github.com/openzipkin/zipkin-go/reporter/http", + "github.com/openzipkin/zipkin-go/reporter/recorder", + "github.com/pkg/errors", + "github.com/rogpeppe/go-internal/semver", + "github.com/spf13/pflag", + "go.opencensus.io/exporter/prometheus", + "go.opencensus.io/exporter/zipkin", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/b3", + "go.opencensus.io/stats", + "go.opencensus.io/stats/view", + "go.opencensus.io/tag", + "go.opencensus.io/trace", + "go.uber.org/zap", + "go.uber.org/zap/zapcore", + "go.uber.org/zap/zaptest", + "golang.org/x/sync/errgroup", + "k8s.io/api/admission/v1beta1", + "k8s.io/api/admissionregistration/v1beta1", + "k8s.io/api/apps/v1", + "k8s.io/api/authentication/v1", + "k8s.io/api/batch/v1", + "k8s.io/api/core/v1", + "k8s.io/api/rbac/v1", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake", + "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions", + "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1", + "k8s.io/apimachinery/pkg/api/equality", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/meta", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/api/validation", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/selection", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/sets/types", + "k8s.io/apimachinery/pkg/util/validation", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/version", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/dynamic", + "k8s.io/client-go/dynamic/fake", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/apps/v1", + "k8s.io/client-go/informers/autoscaling/v1", + "k8s.io/client-go/informers/autoscaling/v2beta1", + "k8s.io/client-go/informers/batch/v1", + "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/informers/rbac/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", + "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/workqueue", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/client-gen/generators/util", + "k8s.io/code-generator/cmd/client-gen/types", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/pkg/util", + "k8s.io/gengo/args", + "k8s.io/gengo/generator", + "k8s.io/gengo/namer", + "k8s.io/gengo/types", + "k8s.io/klog", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/knative/pkg/Gopkg.toml b/vendor/github.com/knative/pkg/Gopkg.toml new file mode 100644 index 000000000..4403eaaec --- /dev/null +++ b/vendor/github.com/knative/pkg/Gopkg.toml @@ -0,0 +1,78 @@ +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. + +required = [ + "k8s.io/apimachinery/pkg/util/sets/types", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/informer-gen", + "github.com/evanphx/json-patch", + "github.com/knative/test-infra/scripts", + "github.com/knative/test-infra/tools/dep-collector", + "github.com/pkg/errors", +] + +[[constraint]] + name = "k8s.io/api" + version = "kubernetes-1.12.6" + +[[override]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.12.6" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.12.6" + +[[constraint]] + name = "k8s.io/client-go" + version = "kubernetes-1.12.6" + +[[constraint]] + name = "k8s.io/code-generator" + version = "kubernetes-1.12.6" + +[[override]] + name = "github.com/json-iterator/go" + # This is the commit at which k8s depends on this in 1.11 + # It seems to be broken at HEAD. + revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" + +[[override]] + name = "go.uber.org/zap" + revision = "67bc79d13d155c02fd008f721863ff8cc5f30659" + +[[constraint]] + name = "github.com/google/go-cmp" + # HEAD as of 2019-04-09 + revision = "6f77996f0c42f7b84e5a2b252227263f93432e9b" + +[[override]] + name = "github.com/rogpeppe/go-internal" + # HEAD as of 2019-01-09 + # Needed because release 1.0.0 does not contain a LICENSE file + revision = "4bbc89b6501cca7dd6b5557d78d70c8d2c6e8b97" + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/stackdriver" + version = "0.9.2" + +[[constraint]] + name = "github.com/knative/test-infra" + branch = "master" + +[prune] + go-tests = true + unused-packages = true + non-go = true + +[[prune.project]] + name = "k8s.io/code-generator" + unused-packages = false + non-go = false + +[[prune.project]] + name = "github.com/knative/test-infra" + non-go = false diff --git a/vendor/github.com/knative/pkg/OWNERS b/vendor/github.com/knative/pkg/OWNERS new file mode 100644 index 000000000..030df98f7 --- /dev/null +++ b/vendor/github.com/knative/pkg/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- pkg-approvers diff --git a/vendor/github.com/knative/pkg/OWNERS_ALIASES b/vendor/github.com/knative/pkg/OWNERS_ALIASES new file mode 100644 index 000000000..beee78a4f --- /dev/null +++ b/vendor/github.com/knative/pkg/OWNERS_ALIASES @@ -0,0 +1,65 @@ +aliases: + pkg-approvers: + - evankanderson + - mattmoor + - vaikas-google + + apis-approvers: + - mattmoor + - vaikas-google + - n3wscott + + apis-istio-approvers: + - tcnghia + + apis-duck-approvers: + - mattmoor + - vaikas-google + + cloudevents-approvers: + - n3wscott + - vaikas-google + + configmap-approvers: + - mattmoor + - mdemirhan + + controller-approvers: + - mattmoor + - grantr + - tcnghia + + kmeta-approvers: + - mattmoor + - jonjohnsonjr + + logging-approvers: + - mdemirhan + - n3wscott + - yanweiguo + + metrics-approvers: + - mdemirhan + - yanweiguo + + productivity-approvers: + - adrcunha + - chaodaiG + - srinivashegde86 + productivity-reviewers: + - adrcunha + - chaodaiG + - coryrc + - dushyanthsc + - ericKlawitter + - Fredy-Z + - nbarthwal + - srinivashegde86 + - steuhs + - yt3liu + + webhook-approvers: + - mattmoor + - grantr + - tcnghia + diff --git a/vendor/github.com/knative/pkg/README.md b/vendor/github.com/knative/pkg/README.md new file mode 100644 index 000000000..b0098ca9c --- /dev/null +++ b/vendor/github.com/knative/pkg/README.md @@ -0,0 +1,13 @@ +# Knative Common Packages + +[![GoDoc](https://godoc.org/github.com/knative/pkg?status.svg)](https://godoc.org/github.com/knative/pkg) +[![Go Report Card](https://goreportcard.com/badge/knative/pkg)](https://goreportcard.com/report/knative/pkg) + +Knative `pkg` provides a place for sharing common Knative packages across the +Knative repos. + +To learn more about Knative, please visit our +[Knative docs](https://github.com/knative/docs) repository. + +If you are interested in contributing, see [CONTRIBUTING.md](./CONTRIBUTING.md) +and [DEVELOPMENT.md](./DEVELOPMENT.md). diff --git a/vendor/github.com/knative/pkg/apis/OWNERS b/vendor/github.com/knative/pkg/apis/OWNERS new file mode 100644 index 000000000..a25420ebc --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- apis-approvers diff --git a/vendor/github.com/knative/pkg/apis/duck/OWNERS b/vendor/github.com/knative/pkg/apis/duck/OWNERS new file mode 100644 index 000000000..ad4d83c51 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- apis-duck-approvers diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go new file mode 100644 index 000000000..75ab2f573 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go @@ -0,0 +1,113 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/knative/pkg/apis" + "github.com/knative/pkg/apis/duck" + "github.com/knative/pkg/apis/duck/v1beta1" +) + +// Addressable provides a generic mechanism for a custom resource +// definition to indicate a destination for message delivery. + +// Addressable is the schema for the destination information. This is +// typically stored in the object's `status`, as this information may +// be generated by the controller. +type Addressable struct { + v1beta1.Addressable `json:",omitempty"` + + Hostname string `json:"hostname,omitempty"` +} + +// Addressable is an Implementable "duck type". +var _ duck.Implementable = (*Addressable)(nil) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AddressableType is a skeleton type wrapping Addressable in the manner we expect +// resource writers defining compatible resources to embed it. We will +// typically use this type to deserialize Addressable ObjectReferences and +// access the Addressable data. This is not a real resource. +type AddressableType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status AddressStatus `json:"status"` +} + +// AddressStatus shows how we expect folks to embed Addressable in +// their Status field. +type AddressStatus struct { + Address *Addressable `json:"address,omitempty"` +} + +var ( + // Verify AddressableType resources meet duck contracts. + _ duck.Populatable = (*AddressableType)(nil) + _ apis.Listable = (*AddressableType)(nil) +) + +// GetFullType implements duck.Implementable +func (*Addressable) GetFullType() duck.Populatable { + return &AddressableType{} +} + +// Populate implements duck.Populatable +func (t *AddressableType) Populate() { + t.Status = AddressStatus{ + &Addressable{ + // Populate ALL fields + Addressable: v1beta1.Addressable{ + URL: &apis.URL{ + Scheme: "http", + Host: "foo.bar.svc.cluster.local", + }, + }, + Hostname: "this is not empty", + }, + } +} + +func (a Addressable) GetURL() apis.URL { + if a.URL != nil { + return *a.URL + } + return apis.URL{ + Scheme: "http", + Host: a.Hostname, + } +} + +// GetListType implements apis.Listable +func (*AddressableType) GetListType() runtime.Object { + return &AddressableTypeList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AddressableTypeList is a list of AddressableType resources +type AddressableTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []AddressableType `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go new file mode 100644 index 000000000..72d4bf605 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go @@ -0,0 +1,388 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + "sort" + "time" + + "fmt" + + "github.com/knative/pkg/apis" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Conditions is the interface for a Resource that implements the getter and +// setter for accessing a Condition collection. +// +k8s:deepcopy-gen=true +type ConditionsAccessor interface { + GetConditions() Conditions + SetConditions(Conditions) +} + +// ConditionSet is an abstract collection of the possible ConditionType values +// that a particular resource might expose. It also holds the "happy condition" +// for that resource, which we define to be one of Ready or Succeeded depending +// on whether it is a Living or Batch process respectively. +// +k8s:deepcopy-gen=false +type ConditionSet struct { + happy ConditionType + dependents []ConditionType +} + +// ConditionManager allows a resource to operate on its Conditions using higher +// order operations. +type ConditionManager interface { + // IsHappy looks at the happy condition and returns true if that condition is + // set to true. + IsHappy() bool + + // GetCondition finds and returns the Condition that matches the ConditionType + // previously set on Conditions. + GetCondition(t ConditionType) *Condition + + // SetCondition sets or updates the Condition on Conditions for Condition.Type. + // If there is an update, Conditions are stored back sorted. + SetCondition(new Condition) + + // MarkTrue sets the status of t to true, and then marks the happy condition to + // true if all dependents are true. + MarkTrue(t ConditionType) + + // MarkUnknown sets the status of t to Unknown and also sets the happy condition + // to Unknown if no other dependent condition is in an error state. + MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{}) + + // MarkFalse sets the status of t and the happy condition to False. + MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{}) + + // InitializeConditions updates all Conditions in the ConditionSet to Unknown + // if not set. + InitializeConditions() + + // InitializeCondition updates a Condition to Unknown if not set. + InitializeCondition(t ConditionType) +} + +// NewLivingConditionSet returns a ConditionSet to hold the conditions for the +// living resource. ConditionReady is used as the happy condition. +// The set of condition types provided are those of the terminal subconditions. +func NewLivingConditionSet(d ...ConditionType) ConditionSet { + return newConditionSet(ConditionReady, d...) +} + +// NewBatchConditionSet returns a ConditionSet to hold the conditions for the +// batch resource. ConditionSucceeded is used as the happy condition. +// The set of condition types provided are those of the terminal subconditions. +func NewBatchConditionSet(d ...ConditionType) ConditionSet { + return newConditionSet(ConditionSucceeded, d...) +} + +// newConditionSet returns a ConditionSet to hold the conditions that are +// important for the caller. The first ConditionType is the overarching status +// for that will be used to signal the resources' status is Ready or Succeeded. +func newConditionSet(happy ConditionType, dependents ...ConditionType) ConditionSet { + var deps []ConditionType + for _, d := range dependents { + // Skip duplicates + if d == happy || contains(deps, d) { + continue + } + deps = append(deps, d) + } + return ConditionSet{ + happy: happy, + dependents: deps, + } +} + +func contains(ct []ConditionType, t ConditionType) bool { + for _, c := range ct { + if c == t { + return true + } + } + return false +} + +// Check that conditionsImpl implements ConditionManager. +var _ ConditionManager = (*conditionsImpl)(nil) + +// conditionsImpl implements the helper methods for evaluating Conditions. +// +k8s:deepcopy-gen=false +type conditionsImpl struct { + ConditionSet + accessor ConditionsAccessor +} + +// Manage creates a ConditionManager from a accessor object using the original +// ConditionSet as a reference. Status must be or point to a struct. +func (r ConditionSet) Manage(status interface{}) ConditionManager { + + // First try to see if status implements ConditionsAccessor + ca, ok := status.(ConditionsAccessor) + if ok { + return conditionsImpl{ + accessor: ca, + ConditionSet: r, + } + } + + // Next see if we can use reflection to gain access to Conditions + ca = NewReflectedConditionsAccessor(status) + if ca != nil { + return conditionsImpl{ + accessor: ca, + ConditionSet: r, + } + } + + // We tried. This object is not understood by the condition manager. + //panic(fmt.Sprintf("Error converting %T into a ConditionsAccessor", status)) + // TODO: not sure which way. using panic above means passing nil status panics the system. + return conditionsImpl{ + ConditionSet: r, + } +} + +// IsHappy looks at the happy condition and returns true if that condition is +// set to true. +func (r conditionsImpl) IsHappy() bool { + if c := r.GetCondition(r.happy); c == nil || !c.IsTrue() { + return false + } + return true +} + +// GetCondition finds and returns the Condition that matches the ConditionType +// previously set on Conditions. +func (r conditionsImpl) GetCondition(t ConditionType) *Condition { + if r.accessor == nil { + return nil + } + + for _, c := range r.accessor.GetConditions() { + if c.Type == t { + return &c + } + } + return nil +} + +// SetCondition sets or updates the Condition on Conditions for Condition.Type. +// If there is an update, Conditions are stored back sorted. +func (r conditionsImpl) SetCondition(new Condition) { + if r.accessor == nil { + return + } + t := new.Type + var conditions Conditions + for _, c := range r.accessor.GetConditions() { + if c.Type != t { + conditions = append(conditions, c) + } else { + // If we'd only update the LastTransitionTime, then return. + new.LastTransitionTime = c.LastTransitionTime + if reflect.DeepEqual(&new, &c) { + return + } + } + } + new.LastTransitionTime = apis.VolatileTime{Inner: metav1.NewTime(time.Now())} + conditions = append(conditions, new) + // Sorted for convenience of the consumer, i.e. kubectl. + sort.Slice(conditions, func(i, j int) bool { return conditions[i].Type < conditions[j].Type }) + r.accessor.SetConditions(conditions) +} + +func (r conditionsImpl) isTerminal(t ConditionType) bool { + for _, cond := range r.dependents { + if cond == t { + return true + } + } + + if t == r.happy { + return true + } + + return false +} + +func (r conditionsImpl) severity(t ConditionType) ConditionSeverity { + if r.isTerminal(t) { + return ConditionSeverityError + } + return ConditionSeverityInfo +} + +// MarkTrue sets the status of t to true, and then marks the happy condition to +// true if all other dependents are also true. +func (r conditionsImpl) MarkTrue(t ConditionType) { + // set the specified condition + r.SetCondition(Condition{ + Type: t, + Status: corev1.ConditionTrue, + Severity: r.severity(t), + }) + + // check the dependents. + for _, cond := range r.dependents { + c := r.GetCondition(cond) + // Failed or Unknown conditions trump true conditions + if !c.IsTrue() { + return + } + } + + // set the happy condition + r.SetCondition(Condition{ + Type: r.happy, + Status: corev1.ConditionTrue, + Severity: r.severity(r.happy), + }) +} + +// MarkUnknown sets the status of t to Unknown and also sets the happy condition +// to Unknown if no other dependent condition is in an error state. +func (r conditionsImpl) MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{}) { + // set the specified condition + r.SetCondition(Condition{ + Type: t, + Status: corev1.ConditionUnknown, + Reason: reason, + Message: fmt.Sprintf(messageFormat, messageA...), + Severity: r.severity(t), + }) + + // check the dependents. + isDependent := false + for _, cond := range r.dependents { + c := r.GetCondition(cond) + // Failed conditions trump Unknown conditions + if c.IsFalse() { + // Double check that the happy condition is also false. + happy := r.GetCondition(r.happy) + if !happy.IsFalse() { + r.MarkFalse(r.happy, reason, messageFormat, messageA...) + } + return + } + if cond == t { + isDependent = true + } + } + + if isDependent { + // set the happy condition, if it is one of our dependent subconditions. + r.SetCondition(Condition{ + Type: r.happy, + Status: corev1.ConditionUnknown, + Reason: reason, + Message: fmt.Sprintf(messageFormat, messageA...), + Severity: r.severity(r.happy), + }) + } +} + +// MarkFalse sets the status of t and the happy condition to False. +func (r conditionsImpl) MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{}) { + types := []ConditionType{t} + for _, cond := range r.dependents { + if cond == t { + types = append(types, r.happy) + } + } + + for _, t := range types { + r.SetCondition(Condition{ + Type: t, + Status: corev1.ConditionFalse, + Reason: reason, + Message: fmt.Sprintf(messageFormat, messageA...), + Severity: r.severity(t), + }) + } +} + +// InitializeConditions updates all Conditions in the ConditionSet to Unknown +// if not set. +func (r conditionsImpl) InitializeConditions() { + for _, t := range r.dependents { + r.InitializeCondition(t) + } + r.InitializeCondition(r.happy) +} + +// InitializeCondition updates a Condition to Unknown if not set. +func (r conditionsImpl) InitializeCondition(t ConditionType) { + if c := r.GetCondition(t); c == nil { + r.SetCondition(Condition{ + Type: t, + Status: corev1.ConditionUnknown, + Severity: r.severity(t), + }) + } +} + +// NewReflectedConditionsAccessor uses reflection to return a ConditionsAccessor +// to access the field called "Conditions". +func NewReflectedConditionsAccessor(status interface{}) ConditionsAccessor { + statusValue := reflect.Indirect(reflect.ValueOf(status)) + + // If status is not a struct, don't even try to use it. + if statusValue.Kind() != reflect.Struct { + return nil + } + + conditionsField := statusValue.FieldByName("Conditions") + + if conditionsField.IsValid() && conditionsField.CanInterface() && conditionsField.CanSet() { + if _, ok := conditionsField.Interface().(Conditions); ok { + return &reflectedConditionsAccessor{ + conditions: conditionsField, + } + } + } + return nil +} + +// reflectedConditionsAccessor is an internal wrapper object to act as the +// ConditionsAccessor for status objects that do not implement ConditionsAccessor +// directly, but do expose the field using the "Conditions" field name. +type reflectedConditionsAccessor struct { + conditions reflect.Value +} + +// GetConditions uses reflection to return Conditions from the held status object. +func (r *reflectedConditionsAccessor) GetConditions() Conditions { + if r != nil && r.conditions.IsValid() && r.conditions.CanInterface() { + if conditions, ok := r.conditions.Interface().(Conditions); ok { + return conditions + } + } + return Conditions(nil) +} + +// SetConditions uses reflection to set Conditions on the held status object. +func (r *reflectedConditionsAccessor) SetConditions(conditions Conditions) { + if r != nil && r.conditions.IsValid() && r.conditions.CanSet() { + r.conditions.Set(reflect.ValueOf(conditions)) + } +} diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go new file mode 100644 index 000000000..b82de3c4c --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go @@ -0,0 +1,202 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/knative/pkg/apis" + "github.com/knative/pkg/apis/duck" +) + +// Conditions is the schema for the conditions portion of the payload +type Conditions []Condition + +// ConditionType is a camel-cased condition type. +type ConditionType string + +const ( + // ConditionReady specifies that the resource is ready. + // For long-running resources. + ConditionReady ConditionType = "Ready" + // ConditionSucceeded specifies that the resource has finished. + // For resource which run to completion. + ConditionSucceeded ConditionType = "Succeeded" +) + +// ConditionSeverity expresses the severity of a Condition Type failing. +type ConditionSeverity string + +const ( + // ConditionSeverityError specifies that a failure of a condition type + // should be viewed as an error. As "Error" is the default for conditions + // we use the empty string (coupled with omitempty) to avoid confusion in + // the case where the condition is in state "True" (aka nothing is wrong). + ConditionSeverityError ConditionSeverity = "" + // ConditionSeverityWarning specifies that a failure of a condition type + // should be viewed as a warning, but that things could still work. + ConditionSeverityWarning ConditionSeverity = "Warning" + // ConditionSeverityInfo specifies that a failure of a condition type + // should be viewed as purely informational, and that things could still work. + ConditionSeverityInfo ConditionSeverity = "Info" +) + +// Conditions defines a readiness condition for a Knative resource. +// See: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#typical-status-properties +// +k8s:deepcopy-gen=true +type Condition struct { + // Type of condition. + // +required + Type ConditionType `json:"type" description:"type of status condition"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // Severity with which to treat failures of this type of condition. + // When this is not specified, it defaults to Error. + // +optional + Severity ConditionSeverity `json:"severity,omitempty" description:"how to interpret failures of this condition, one of Error, Warning, Info"` + + // LastTransitionTime is the last time the condition transitioned from one status to another. + // We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic + // differences (all other things held constant). + // +optional + LastTransitionTime apis.VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"` + + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +// IsTrue is true if the condition is True +func (c *Condition) IsTrue() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionTrue +} + +// IsFalse is true if the condition is False +func (c *Condition) IsFalse() bool { + if c == nil { + return false + } + return c.Status == corev1.ConditionFalse +} + +// IsUnknown is true if the condition is Unknown +func (c *Condition) IsUnknown() bool { + if c == nil { + return true + } + return c.Status == corev1.ConditionUnknown +} + +// Conditions is an Implementable "duck type". +var _ duck.Implementable = (*Conditions)(nil) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KResource is a skeleton type wrapping Conditions in the manner we expect +// resource writers defining compatible resources to embed it. We will +// typically use this type to deserialize Conditions ObjectReferences and +// access the Conditions data. This is not a real resource. +type KResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status Status `json:"status"` +} + +// Status shows how we expect folks to embed Conditions in +// their Status field. +// WARNING: Adding fields to this struct will add them to all Knative resources. +type Status struct { + // ObservedGeneration is the 'Generation' of the Service that + // was last processed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions the latest available observations of a resource's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions Conditions `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// TODO: KResourceStatus is added for backwards compatibility for <= 0.4.0 releases. Remove later. +// KResourceStatus [Deprecated] use Status directly. Will be deleted ~0.6.0 release. +type KResourceStatus Status + +// In order for Conditions to be Implementable, KResource must be Populatable. +var _ duck.Populatable = (*KResource)(nil) + +// Ensure KResource satisfies apis.Listable +var _ apis.Listable = (*KResource)(nil) + +// GetFullType implements duck.Implementable +func (*Conditions) GetFullType() duck.Populatable { + return &KResource{} +} + +// GetCondition fetches the condition of the specified type. +func (s *Status) GetCondition(t ConditionType) *Condition { + for _, cond := range s.Conditions { + if cond.Type == t { + return &cond + } + } + return nil +} + +// Populate implements duck.Populatable +func (t *KResource) Populate() { + t.Status.ObservedGeneration = 42 + t.Status.Conditions = Conditions{{ + // Populate ALL fields + Type: "Birthday", + Status: corev1.ConditionTrue, + LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))}, + Reason: "Celebrate", + Message: "n3wScott, find your party hat :tada:", + }} +} + +// GetListType implements apis.Listable +func (*KResource) GetListType() runtime.Object { + return &KResourceList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KResourceList is a list of KResource resources +type KResourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []KResource `json:"items"` +} diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go similarity index 68% rename from vendor/k8s.io/api/networking/v1beta1/doc.go rename to vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go index 12d3d4ff0..3638eb7a3 100644 --- a/vendor/k8s.io/api/networking/v1beta1/doc.go +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true -// +groupName=networking.k8s.io +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource -package v1beta1 // import "k8s.io/api/networking/v1beta1" +// +k8s:deepcopy-gen=package +// +groupName=duck.knative.dev +package v1alpha1 diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go new file mode 100644 index 000000000..5e4d6f2e3 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/knative/pkg/apis" + "github.com/knative/pkg/apis/duck" +) + +// LegacyTargetable left around until we migrate to Addressable in the +// dependent resources. Addressable has more structure in the way it +// defines the fields. LegacyTargetable only assumed a single string +// in the Status field and we're moving towards defining proper structs +// under Status rather than strings. +// This is to support existing resources until they migrate. +// +// Do not use this for anything new, use Addressable + +// LegacyTargetable is the old schema for the addressable portion +// of the payload +// +// For new resources use Addressable. +type LegacyTargetable struct { + DomainInternal string `json:"domainInternal,omitempty"` +} + +// LegacyTargetable is an Implementable "duck type". +var _ duck.Implementable = (*LegacyTargetable)(nil) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LegacyTarget is a skeleton type wrapping LegacyTargetable in the manner we +// want to support unless they get migrated into supporting Legacy. +// We will typically use this type to deserialize LegacyTargetable +// ObjectReferences and access the LegacyTargetable data. This is not a +// real resource. +// ** Do not use this for any new resources ** +type LegacyTarget struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status LegacyTargetable `json:"status"` +} + +// In order for LegacyTargetable to be Implementable, LegacyTarget must be Populatable. +var _ duck.Populatable = (*LegacyTarget)(nil) + +// Ensure LegacyTarget satisfies apis.Listable +var _ apis.Listable = (*LegacyTarget)(nil) + +// GetFullType implements duck.Implementable +func (*LegacyTargetable) GetFullType() duck.Populatable { + return &LegacyTarget{} +} + +// Populate implements duck.Populatable +func (t *LegacyTarget) Populate() { + t.Status = LegacyTargetable{ + // Populate ALL fields + DomainInternal: "this is not empty", + } +} + +// GetListType implements apis.Listable +func (*LegacyTarget) GetListType() runtime.Object { + return &LegacyTargetList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LegacyTargetList is a list of LegacyTarget resources +type LegacyTargetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []LegacyTarget `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go new file mode 100644 index 000000000..4bb344f2a --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/knative/pkg/apis/duck" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: duck.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes( + SchemeGroupVersion, + &KResource{}, + (&KResource{}).GetListType(), + &AddressableType{}, + (&AddressableType{}).GetListType(), + &Target{}, + (&Target{}).GetListType(), + &LegacyTarget{}, + (&LegacyTarget{}).GetListType(), + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go new file mode 100644 index 000000000..0e91aef8a --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/knative/pkg/apis" + "github.com/knative/pkg/apis/duck" +) + +// Targetable is an earlier version of the Callable interface. +// Callable is a higher-level interface which implements Addressable +// but further promises that the destination may synchronously return +// response messages in reply to a message. +// +// Targetable implementations should instead implement Addressable and +// include an `eventing.knative.dev/returns=any` annotation. + +// Targetable is retired; implement Addressable for now. +type Targetable struct { + DomainInternal string `json:"domainInternal,omitempty"` +} + +// Targetable is an Implementable "duck type". +var _ duck.Implementable = (*Targetable)(nil) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Target is a skeleton type wrapping Targetable in the manner we expect +// resource writers defining compatible resources to embed it. We will +// typically use this type to deserialize Targetable ObjectReferences and +// access the Targetable data. This is not a real resource. +type Target struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status TargetStatus `json:"status"` +} + +// TargetStatus shows how we expect folks to embed Targetable in +// their Status field. +type TargetStatus struct { + Targetable *Targetable `json:"targetable,omitempty"` +} + +var ( + // In order for Targetable to be Implementable, Target must be Populatable. + _ duck.Populatable = (*Target)(nil) + + // Ensure Target satisfies apis.Listable + _ apis.Listable = (*Target)(nil) +) + +// GetFullType implements duck.Implementable +func (*Targetable) GetFullType() duck.Populatable { + return &Target{} +} + +// Populate implements duck.Populatable +func (t *Target) Populate() { + t.Status = TargetStatus{ + &Targetable{ + // Populate ALL fields + DomainInternal: "this is not empty", + }, + } +} + +// GetListType implements apis.Listable +func (*Target) GetListType() runtime.Object { + return &TargetList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TargetList is a list of Target resources +type TargetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Target `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a59e67ce3 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,441 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressStatus) DeepCopyInto(out *AddressStatus) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(Addressable) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressStatus. +func (in *AddressStatus) DeepCopy() *AddressStatus { + if in == nil { + return nil + } + out := new(AddressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addressable) DeepCopyInto(out *Addressable) { + *out = *in + in.Addressable.DeepCopyInto(&out.Addressable) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addressable. +func (in *Addressable) DeepCopy() *Addressable { + if in == nil { + return nil + } + out := new(Addressable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressableType) DeepCopyInto(out *AddressableType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableType. +func (in *AddressableType) DeepCopy() *AddressableType { + if in == nil { + return nil + } + out := new(AddressableType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressableType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressableTypeList) DeepCopyInto(out *AddressableTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AddressableType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableTypeList. +func (in *AddressableTypeList) DeepCopy() *AddressableTypeList { + if in == nil { + return nil + } + out := new(AddressableTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressableTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KResource) DeepCopyInto(out *KResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResource. +func (in *KResource) DeepCopy() *KResource { + if in == nil { + return nil + } + out := new(KResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KResource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KResourceList) DeepCopyInto(out *KResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResourceList. +func (in *KResourceList) DeepCopy() *KResourceList { + if in == nil { + return nil + } + out := new(KResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KResourceStatus) DeepCopyInto(out *KResourceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResourceStatus. +func (in *KResourceStatus) DeepCopy() *KResourceStatus { + if in == nil { + return nil + } + out := new(KResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LegacyTarget) DeepCopyInto(out *LegacyTarget) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTarget. +func (in *LegacyTarget) DeepCopy() *LegacyTarget { + if in == nil { + return nil + } + out := new(LegacyTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LegacyTarget) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LegacyTargetList) DeepCopyInto(out *LegacyTargetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LegacyTarget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTargetList. +func (in *LegacyTargetList) DeepCopy() *LegacyTargetList { + if in == nil { + return nil + } + out := new(LegacyTargetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LegacyTargetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LegacyTargetable) DeepCopyInto(out *LegacyTargetable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LegacyTargetable. +func (in *LegacyTargetable) DeepCopy() *LegacyTargetable { + if in == nil { + return nil + } + out := new(LegacyTargetable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Target) DeepCopyInto(out *Target) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Target. +func (in *Target) DeepCopy() *Target { + if in == nil { + return nil + } + out := new(Target) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Target) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetList) DeepCopyInto(out *TargetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Target, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetList. +func (in *TargetList) DeepCopy() *TargetList { + if in == nil { + return nil + } + out := new(TargetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { + *out = *in + if in.Targetable != nil { + in, out := &in.Targetable, &out.Targetable + *out = new(Targetable) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetStatus. +func (in *TargetStatus) DeepCopy() *TargetStatus { + if in == nil { + return nil + } + out := new(TargetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Targetable) DeepCopyInto(out *Targetable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Targetable. +func (in *Targetable) DeepCopy() *Targetable { + if in == nil { + return nil + } + out := new(Targetable) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/knative/pkg/apis/istio/OWNERS b/vendor/github.com/knative/pkg/apis/istio/OWNERS new file mode 100644 index 000000000..c09668f13 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- apis-istio-approvers diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go b/vendor/github.com/knative/pkg/apis/istio/authentication/register.go similarity index 80% rename from vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go rename to vendor/github.com/knative/pkg/apis/istio/authentication/register.go index cc321329b..f54c7742d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/register.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. +package authentication -package v1 - -type PriorityClassExpansion interface{} +const ( + GroupName = "authentication.istio.io" +) diff --git a/vendor/github.com/knative/test-infra/scripts/dummy.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go similarity index 63% rename from vendor/github.com/knative/test-infra/scripts/dummy.go rename to vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go index e6cc380fd..07b17599c 100644 --- a/vendor/github.com/knative/test-infra/scripts/dummy.go +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,13 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package scripts - -import ( - "fmt" -) - -func main() { - fmt.Println("This is a dummy go file so `go dep` can be used with knative/test-infra/scripts") - fmt.Println("This file can be safely removed if one day this directory contains real, useful go code") -} +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource +// +k8s:deepcopy-gen=package +// +groupName=authentication.istio.io +package v1alpha1 diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go new file mode 100644 index 000000000..882b1fc3f --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go @@ -0,0 +1,345 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/knative/pkg/apis/istio/common/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// VirtualService +type Policy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PolicySpec `json:"spec"` +} + +// Policy defines what authentication methods can be accepted on workload(s), +// and if authenticated, which method/certificate will set the request principal +// (i.e request.auth.principal attribute). +// +// Authentication policy is composed of 2-part authentication: +// - peer: verify caller service credentials. This part will set source.user +// (peer identity). +// - origin: verify the origin credentials. This part will set request.auth.user +// (origin identity), as well as other attributes like request.auth.presenter, +// request.auth.audiences and raw claims. Note that the identity could be +// end-user, service account, device etc. +// +// Last but not least, the principal binding rule defines which identity (peer +// or origin) should be used as principal. By default, it uses peer. +// +// Examples: +// +// Policy to enable mTLS for all services in namespace frod +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_enable +// namespace: frod +// spec: +// peers: +// - mtls: +// +// Policy to disable mTLS for "productpage" service +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_disable +// namespace: frod +// spec: +// targets: +// - name: productpage +// +// Policy to require mTLS for peer authentication, and JWT for origin authenticationn +// for productpage:9000. Principal is set from origin identity. +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_enable +// namespace: frod +// spec: +// target: +// - name: productpage +// ports: +// - number: 9000 +// peers: +// - mtls: +// origins: +// - jwt: +// issuer: "https://securetoken.google.com" +// audiences: +// - "productpage" +// jwksUri: "https://www.googleapis.com/oauth2/v1/certs" +// jwt_headers: +// - "x-goog-iap-jwt-assertion" +// principaBinding: USE_ORIGIN +// +// Policy to require mTLS for peer authentication, and JWT for origin authenticationn +// for productpage:9000, but allow origin authentication failed. Principal is set +// from origin identity. +// Note: this example can be used for use cases when we want to allow request from +// certain peers, given it comes with an approperiate authorization poicy to check +// and reject request accoridingly. +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_enable +// namespace: frod +// spec: +// target: +// - name: productpage +// ports: +// - number: 9000 +// peers: +// - mtls: +// origins: +// - jwt: +// issuer: "https://securetoken.google.com" +// audiences: +// - "productpage" +// jwksUri: "https://www.googleapis.com/oauth2/v1/certs" +// jwt_headers: +// - "x-goog-iap-jwt-assertion" +// originIsOptional: true +// principalBinding: USE_ORIGIN +type PolicySpec struct { + // List rules to select destinations that the policy should be applied on. + // If empty, policy will be used on all destinations in the same namespace. + Targets []TargetSelector `json:"targets,omitempty"` + + // List of authentication methods that can be used for peer authentication. + // They will be evaluated in order; the first validate one will be used to + // set peer identity (source.user) and other peer attributes. If none of + // these methods pass, and peer_is_optional flag is false (see below), + // request will be rejected with authentication failed error (401). + // Leave the list empty if peer authentication is not required + Peers []PeerAuthenticationMethod `json:"peers,omitempty"` + + // Set this flag to true to accept request (for peer authentication perspective), + // even when none of the peer authentication methods defined above satisfied. + // Typically, this is used to delay the rejection decision to next layer (e.g + // authorization). + // This flag is ignored if no authentication defined for peer (peers field is empty). + PeerIsOptional bool `json:"peerIsOptional,omitempty"` + + // List of authentication methods that can be used for origin authentication. + // Similar to peers, these will be evaluated in order; the first validate one + // will be used to set origin identity and attributes (i.e request.auth.user, + // request.auth.issuer etc). If none of these methods pass, and origin_is_optional + // is false (see below), request will be rejected with authentication failed + // error (401). + // Leave the list empty if origin authentication is not required. + Origins []OriginAuthenticationMethod `json:"origins,omitempty"` + + // Set this flag to true to accept request (for origin authentication perspective), + // even when none of the origin authentication methods defined above satisfied. + // Typically, this is used to delay the rejection decision to next layer (e.g + // authorization). + // This flag is ignored if no authentication defined for origin (origins field is empty). + OriginIsOptional bool `json:"originIsOptional,omitempty"` + + // Define whether peer or origin identity should be use for principal. Default + // value is USE_PEER. + // If peer (or origin) identity is not available, either because of peer/origin + // authentication is not defined, or failed, principal will be left unset. + // In other words, binding rule does not affect the decision to accept or + // reject request. + PrincipalBinding PrincipalBinding `json:"principalBinding,omitempty"` +} + +// TargetSelector defines a matching rule to a service/destination. +type TargetSelector struct { + // REQUIRED. The name must be a short name from the service registry. The + // fully qualified domain name will be resolved in a platform specific manner. + Name string `json:"name"` + + // Specifies the ports on the destination. Leave empty to match all ports + // that are exposed. + Ports []PortSelector `json:"ports,omitempty"` +} + +// PortSelector specifies the name or number of a port to be used for +// matching targets for authenticationn policy. This is copied from +// networking API to avoid dependency. +type PortSelector struct { + // It is required to specify exactly one of the fields: + // Number or Name + + // Valid port number + Number uint32 `json:"number,omitempty"` + + // Port name + Name string `json:"name,omitempty"` +} + +// PeerAuthenticationMethod defines one particular type of authentication, e.g +// mutual TLS, JWT etc, (no authentication is one type by itself) that can +// be used for peer authentication. +// The type can be progammatically determine by checking the type of the +// "params" field. +type PeerAuthenticationMethod struct { + // It is required to specify exactly one of the fields: + // Mtls or Jwt + // Set if mTLS is used. + Mtls *MutualTLS `json:"mtls,omitempty"` + + // Set if JWT is used. This option is not yet available. + Jwt *Jwt `json:"jwt,omitempty"` +} + +// Defines the acceptable connection TLS mode. +type Mode string + +const ( + // Client cert must be presented, connection is in TLS. + ModeStrict Mode = "STRICT" + + // Connection can be either plaintext or TLS, and client cert can be omitted. + ModePermissive Mode = "PERMISSIVE" +) + +// TLS authentication params. +type MutualTLS struct { + + // WILL BE DEPRECATED, if set, will translates to `TLS_PERMISSIVE` mode. + // Set this flag to true to allow regular TLS (i.e without client x509 + // certificate). If request carries client certificate, identity will be + // extracted and used (set to peer identity). Otherwise, peer identity will + // be left unset. + // When the flag is false (default), request must have client certificate. + AllowTLS bool `json:"allowTls,omitempty"` + + // Defines the mode of mTLS authentication. + Mode Mode `json:"mode,omitempty"` +} + +// JSON Web Token (JWT) token format for authentication as defined by +// https://tools.ietf.org/html/rfc7519. See [OAuth +// 2.0](https://tools.ietf.org/html/rfc6749) and [OIDC +// 1.0](http://openid.net/connect) for how this is used in the whole +// authentication flow. +// +// Example, +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// bookstore_web.apps.googleusercontent.com +// jwksUri: https://example.com/.well-known/jwks.json +// +type Jwt struct { + // Identifies the issuer that issued the JWT. See + // [issuer](https://tools.ietf.org/html/rfc7519#section-4.1.1) + // Usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + Issuer string `json:"issuer,omitempty"` + + // The list of JWT + // [audiences](https://tools.ietf.org/html/rfc7519#section-4.1.3). + // that are allowed to access. A JWT containing any of these + // audiences will be accepted. + // + // The service name will be accepted if audiences is empty. + // + // Example: + // + // ```yaml + // audiences: + // - bookstore_android.apps.googleusercontent.com + // bookstore_web.apps.googleusercontent.com + // ``` + Audiences []string `json:"audiences,omitempty"` + + // URL of the provider's public key set to validate signature of the + // JWT. See [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). + // + // Optional if the key set document can either (a) be retrieved from + // [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html) of + // the issuer or (b) inferred from the email domain of the issuer (e.g. a + // Google service account). + // + // Example: https://www.googleapis.com/oauth2/v1/certs + JwksURI string `json:"jwksUri,omitempty"` + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified the following default + // locations are tried in order: + // + // 1) The Authorization header using the Bearer schema, + // e.g. Authorization: Bearer . (see + // [Authorization Request Header + // Field](https://tools.ietf.org/html/rfc6750#section-2.1)) + // + // 2) `access_token` query parameter (see + // [URI Query Parameter](https://tools.ietf.org/html/rfc6750#section-2.3)) + // JWT is sent in a request header. `header` represents the + // header name. + // + // For example, if `header=x-goog-iap-jwt-assertion`, the header + // format will be x-goog-iap-jwt-assertion: . + JwtHeaders []string `json:"jwtHeaders,omitempty"` + + // JWT is sent in a query parameter. `query` represents the + // query parameter name. + // + // For example, `query=jwt_token`. + JwtParams []string `json:"jwtParams,omitempty"` + + // URL paths that should be excluded from the JWT validation. If the request path is matched, + // the JWT validation will be skipped and the request will proceed regardless. + // This is useful to keep a couple of URLs public for external health checks. + // Example: "/health_check", "/status/cpu_usage". + ExcludedPaths []v1alpha1.StringMatch `json:"excludedPaths,omitempty"` +} + +// OriginAuthenticationMethod defines authentication method/params for origin +// authentication. Origin could be end-user, device, delegate service etc. +// Currently, only JWT is supported for origin authentication. +type OriginAuthenticationMethod struct { + // Jwt params for the method. + Jwt *Jwt `json:"jwt,omitempty"` +} + +// Associates authentication with request principal. +type PrincipalBinding string + +const ( + // Principal will be set to the identity from peer authentication. + PrincipalBindingUserPeer PrincipalBinding = "USE_PEER" + // Principal will be set to the identity from peer authentication. + PrincipalBindingUserOrigin PrincipalBinding = "USE_ORIGIN" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PolicyLIst is a list of Policy resources +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []Policy `json:"items"` +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/register.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go similarity index 60% rename from vendor/k8s.io/api/auditregistration/v1alpha1/register.go rename to vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go index d6271608f..7809d1cd9 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/register.go +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go @@ -1,11 +1,11 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -17,16 +17,19 @@ limitations under the License. package v1alpha1 import ( + "github.com/knative/pkg/apis/istio/authentication" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "auditregistration.k8s.io" - // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: authentication.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -34,22 +37,15 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme ) -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - +// Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &AuditSink{}, - &AuditSinkList{}, + &Policy{}, + &PolicyList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go similarity index 56% rename from vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go rename to vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go index 16ac936ae..20f900c9b 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,122 +18,133 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( + commonv1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { +func (in *Jwt) DeepCopyInto(out *Jwt) { *out = *in - out.Backend = in.Backend + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.JwtHeaders != nil { + in, out := &in.JwtHeaders, &out.JwtHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.JwtParams != nil { + in, out := &in.JwtParams, &out.JwtParams + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]commonv1alpha1.StringMatch, len(*in)) + copy(*out, *in) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. -func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Jwt. +func (in *Jwt) DeepCopy() *Jwt { if in == nil { return nil } - out := new(HTTPIngressPath) + out := new(Jwt) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { +func (in *MutualTLS) DeepCopyInto(out *MutualTLS) { *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(*in)) - copy(*out, *in) - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. -func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLS. +func (in *MutualTLS) DeepCopy() *MutualTLS { if in == nil { return nil } - out := new(HTTPIngressRuleValue) + out := new(MutualTLS) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Ingress) DeepCopyInto(out *Ingress) { +func (in *OriginAuthenticationMethod) DeepCopyInto(out *OriginAuthenticationMethod) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + if in.Jwt != nil { + in, out := &in.Jwt, &out.Jwt + *out = new(Jwt) + (*in).DeepCopyInto(*out) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. -func (in *Ingress) DeepCopy() *Ingress { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginAuthenticationMethod. +func (in *OriginAuthenticationMethod) DeepCopy() *OriginAuthenticationMethod { if in == nil { return nil } - out := new(Ingress) + out := new(OriginAuthenticationMethod) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Ingress) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { +func (in *PeerAuthenticationMethod) DeepCopyInto(out *PeerAuthenticationMethod) { *out = *in - out.ServicePort = in.ServicePort + if in.Mtls != nil { + in, out := &in.Mtls, &out.Mtls + *out = new(MutualTLS) + **out = **in + } + if in.Jwt != nil { + in, out := &in.Jwt, &out.Jwt + *out = new(Jwt) + (*in).DeepCopyInto(*out) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. -func (in *IngressBackend) DeepCopy() *IngressBackend { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerAuthenticationMethod. +func (in *PeerAuthenticationMethod) DeepCopy() *PeerAuthenticationMethod { if in == nil { return nil } - out := new(IngressBackend) + out := new(PeerAuthenticationMethod) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressList) DeepCopyInto(out *IngressList) { +func (in *Policy) DeepCopyInto(out *Policy) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ingress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. -func (in *IngressList) DeepCopy() *IngressList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { if in == nil { return nil } - out := new(IngressList) + out := new(Policy) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressList) DeepCopyObject() runtime.Object { +func (in *Policy) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -141,61 +152,58 @@ func (in *IngressList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressRule) DeepCopyInto(out *IngressRule) { +func (in *PolicyList) DeepCopyInto(out *PolicyList) { *out = *in - in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. -func (in *IngressRule) DeepCopy() *IngressRule { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { if in == nil { return nil } - out := new(IngressRule) + out := new(PolicyList) in.DeepCopyInto(out) return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { - *out = *in - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. -func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { - if in == nil { - return nil +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c } - out := new(IngressRuleValue) - in.DeepCopyInto(out) - return out + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { *out = *in - if in.Backend != nil { - in, out := &in.Backend, &out.Backend - *out = new(IngressBackend) - **out = **in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]TargetSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = make([]IngressTLS, len(*in)) + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]PeerAuthenticationMethod, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]IngressRule, len(*in)) + if in.Origins != nil { + in, out := &in.Origins, &out.Origins + *out = make([]OriginAuthenticationMethod, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -203,50 +211,49 @@ func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. -func (in *IngressSpec) DeepCopy() *IngressSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { if in == nil { return nil } - out := new(IngressSpec) + out := new(PolicySpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { +func (in *PortSelector) DeepCopyInto(out *PortSelector) { *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. -func (in *IngressStatus) DeepCopy() *IngressStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSelector. +func (in *PortSelector) DeepCopy() *PortSelector { if in == nil { return nil } - out := new(IngressStatus) + out := new(PortSelector) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { +func (in *TargetSelector) DeepCopyInto(out *TargetSelector) { *out = *in - if in.Hosts != nil { - in, out := &in.Hosts, &out.Hosts - *out = make([]string, len(*in)) + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortSelector, len(*in)) copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. -func (in *IngressTLS) DeepCopy() *IngressTLS { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSelector. +func (in *TargetSelector) DeepCopy() *TargetSelector { if in == nil { return nil } - out := new(IngressTLS) + out := new(TargetSelector) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go b/vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go new file mode 100644 index 000000000..c34c25053 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// Describes how to match a given string in HTTP headers. Match is +// case-sensitive. +type StringMatch struct { + // Specified exactly one of the fields below. + + // exact string match + Exact string `json:"exact,omitempty"` + + // prefix-based match + Prefix string `json:"prefix,omitempty"` + + // suffix-based match. + Suffix string `json:"suffix,omitempty"` + + // ECMAscript style regex-based match + Regex string `json:"regex,omitempty"` +} diff --git a/vendor/github.com/knative/pkg/apis/istio/register.go b/vendor/github.com/knative/pkg/apis/istio/register.go new file mode 100644 index 000000000..647eb38a0 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/register.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package istio + +const ( + GroupName = "networking.istio.io" +) diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/README.md b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/README.md new file mode 100644 index 000000000..bc1fb65a5 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/README.md @@ -0,0 +1,17 @@ +# What are these files? + +These are Go structs for Istio CRD. We translated them from proto files in +https://github.com/istio/api/tree/master/networking/v1alpha3 . + +# Why do we hand-translate from proto? i.e Why can't we vendor these? + +Istio needs to run on many platforms and as a reason they represent their +objects internally as proto. On Kubernetes, their API take in JSON objects and +convert to proto before processing them. + +So they have nothing we can vendor, except for the Go files that are generated +by the proto compiler, which is not compatible with K8s API code-generator at +all. + +We may be able to donate our translation so they can maintain it themselves. See +https://github.com/istio/istio/issues/6084. diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go new file mode 100644 index 000000000..54a43b354 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go @@ -0,0 +1,547 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DestinationRule +type DestinationRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DestinationRuleSpec `json:"spec"` +} + +// DestinationRule defines policies that apply to traffic intended for a +// service after routing has occurred. These rules specify configuration +// for load balancing, connection pool size from the sidecar, and outlier +// detection settings to detect and evict unhealthy hosts from the load +// balancing pool. For example, a simple load balancing policy for the +// ratings service would look as follows: +// +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// +// +// Version specific policies can be specified by defining a named +// subset and overriding the settings specified at the service level. The +// following rule uses a round robin load balancing policy for all traffic +// going to a subset named testversion that is composed of endpoints (e.g., +// pods) with labels (version:v3). +// +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +// +// Traffic policies can be customized to specific ports as well. The +// following rule uses the least connection load balancing policy for all +// traffic to port 80, while uses a round robin load balancing setting for +// traffic to the port 9080. +// +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings-port +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: # Apply to all ports +// portLevelSettings: +// - port: +// number: 80 +// loadBalancer: +// simple: LEAST_CONN +// - port: +// number: 9080 +// loadBalancer: +// simple: ROUND_ROBIN +// +type DestinationRuleSpec struct { + // REQUIRED. The name of a service from the service registry. Service + // names are looked up from the platform's service registry (e.g., + // Kubernetes services, Consul services, etc.) and from the hosts + // declared by [ServiceEntries](#ServiceEntry). Rules defined for + // services that do not exist in the service registry will be ignored. + // + // *Note for Kubernetes users*: When short names are used (e.g. "reviews" + // instead of "reviews.default.svc.cluster.local"), Istio will interpret + // the short name based on the namespace of the rule, not the service. A + // rule in the "default" namespace containing a host "reviews will be + // interpreted as "reviews.default.svc.cluster.local", irrespective of + // the actual namespace associated with the reviews service. _To avoid + // potential misconfigurations, it is recommended to always use fully + // qualified domain names over short names._ + // + // Note that the host field applies to both HTTP and TCP services. + Host string `json:"host"` + + // Traffic policies to apply (load balancing policy, connection pool + // sizes, outlier detection). + TrafficPolicy *TrafficPolicy `json:"trafficPolicy,omitempty"` + + // One or more named sets that represent individual versions of a + // service. Traffic policies can be overridden at subset level. + Subsets []Subset `json:"subsets,omitempty"` +} + +// Traffic policies to apply for a specific destination, across all +// destination ports. See DestinationRule for examples. +type TrafficPolicy struct { + + // Settings controlling the load balancer algorithms. + LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"` + + // Settings controlling the volume of connections to an upstream service + ConnectionPool *ConnectionPoolSettings `json:"connectionPool,omitempty"` + + // Settings controlling eviction of unhealthy hosts from the load balancing pool + OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` + + // TLS related settings for connections to the upstream service. + TLS *TLSSettings `json:"tls,omitempty"` + + // Traffic policies specific to individual ports. Note that port level + // settings will override the destination-level settings. Traffic + // settings specified at the destination-level will not be inherited when + // overridden by port-level settings, i.e. default values will be applied + // to fields omitted in port-level traffic policies. + PortLevelSettings []PortTrafficPolicy `json:"portLevelSettings,omitempty"` +} + +// Traffic policies that apply to specific ports of the service +type PortTrafficPolicy struct { + // Specifies the port name or number of a port on the destination service + // on which this policy is being applied. + // + // Names must comply with DNS label syntax (rfc1035) and therefore cannot + // collide with numbers. If there are multiple ports on a service with + // the same protocol the names should be of the form -. + Port PortSelector `json:"port"` + + // Settings controlling the load balancer algorithms. + LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"` + + // Settings controlling the volume of connections to an upstream service + ConnectionPool *ConnectionPoolSettings `json:"connectionPool,omitempty"` + + // Settings controlling eviction of unhealthy hosts from the load balancing pool + OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` + + // TLS related settings for connections to the upstream service. + TLS *TLSSettings `json:"tls,omitempty"` +} + +// A subset of endpoints of a service. Subsets can be used for scenarios +// like A/B testing, or routing to a specific version of a service. Refer +// to [VirtualService](#VirtualService) documentation for examples of using +// subsets in these scenarios. In addition, traffic policies defined at the +// service-level can be overridden at a subset-level. The following rule +// uses a round robin load balancing policy for all traffic going to a +// subset named testversion that is composed of endpoints (e.g., pods) with +// labels (version:v3). +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +type Subset struct { + // REQUIRED. Name of the subset. The service name and the subset name can + // be used for traffic splitting in a route rule. + Name string `json:"name"` + + // REQUIRED. Labels apply a filter over the endpoints of a service in the + // service registry. See route rules for examples of usage. + Labels map[string]string `json:"labels"` + + // Traffic policies that apply to this subset. Subsets inherit the + // traffic policies specified at the DestinationRule level. Settings + // specified at the subset level will override the corresponding settings + // specified at the DestinationRule level. + TrafficPolicy *TrafficPolicy `json:"trafficPolicy,omitempty"` +} + +// Load balancing policies to apply for a specific destination. See Envoy's +// load balancing +// [documentation](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/load_balancing.html) +// for more details. +// +// For example, the following rule uses a round robin load balancing policy +// for all traffic going to the ratings service. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// +// The following example sets up sticky sessions for the ratings service +// hashing-based load balancer for the same ratings service using the +// the User cookie as the hash key. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// consistentHash: +// httpCookie: +// name: user +// ttl: 0s +type LoadBalancerSettings struct { + // It is required to specify exactly one of the fields: + // Simple or ConsistentHash + Simple SimpleLB `json:"simple,omitempty"` + ConsistentHash *ConsistentHashLB `json:"consistentHash,omitempty"` +} + +// Standard load balancing algorithms that require no tuning. +type SimpleLB string + +const ( + // Round Robin policy. Default + SimpleLBRoundRobin SimpleLB = "ROUND_ROBIN" + + // The least request load balancer uses an O(1) algorithm which selects + // two random healthy hosts and picks the host which has fewer active + // requests. + SimpleLBLeastConn SimpleLB = "LEAST_CONN" + + // The random load balancer selects a random healthy host. The random + // load balancer generally performs better than round robin if no health + // checking policy is configured. + SimpleLBRandom SimpleLB = "RANDOM" + + // This option will forward the connection to the original IP address + // requested by the caller without doing any form of load + // balancing. This option must be used with care. It is meant for + // advanced use cases. Refer to Original Destination load balancer in + // Envoy for further details. + SimpleLBPassthrough SimpleLB = "PASSTHROUGH" +) + +// Consistent Hash-based load balancing can be used to provide soft +// session affinity based on HTTP headers, cookies or other +// properties. This load balancing policy is applicable only for HTTP +// connections. The affinity to a particular destination host will be +// lost when one or more hosts are added/removed from the destination +// service. +type ConsistentHashLB struct { + + // It is required to specify exactly one of the fields as hash key: + // HTTPHeaderName, HTTPCookie, or UseSourceIP. + // Hash based on a specific HTTP header. + HTTPHeaderName string `json:"httpHeaderName,omitempty"` + + // Hash based on HTTP cookie. + HTTPCookie *HTTPCookie `json:"httpCookie,omitempty"` + + // Hash based on the source IP address. + UseSourceIP bool `json:"useSourceIp,omitempty"` + + // The minimum number of virtual nodes to use for the hash + // ring. Defaults to 1024. Larger ring sizes result in more granular + // load distributions. If the number of hosts in the load balancing + // pool is larger than the ring size, each host will be assigned a + // single virtual node. + MinimumRingSize uint64 `json:"minimumRingSize,omitempty"` +} + +// Describes a HTTP cookie that will be used as the hash key for the +// Consistent Hash load balancer. If the cookie is not present, it will +// be generated. +type HTTPCookie struct { + // REQUIRED. Name of the cookie. + Name string `json:"name"` + + // Path to set for the cookie. + Path string `json:"path,omitempty"` + + // REQUIRED. Lifetime of the cookie. + TTL string `json:"ttl"` +} + +// Connection pool settings for an upstream host. The settings apply to +// each individual host in the upstream service. See Envoy's [circuit +// breaker](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/circuit_breaking) +// for more details. Connection pool settings can be applied at the TCP +// level as well as at HTTP level. +// +// For example, the following rule sets a limit of 100 connections to redis +// service called myredissrv with a connect timeout of 30ms +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-redis +// spec: +// host: myredissrv.prod.svc.cluster.local +// trafficPolicy: +// connectionPool: +// tcp: +// maxConnections: 100 +// connectTimeout: 30ms +type ConnectionPoolSettings struct { + + // Settings common to both HTTP and TCP upstream connections. + TCP *TCPSettings `json:"tcp,omitempty"` + + // HTTP connection pool settings. + HTTP *HTTPSettings `json:"http,omitempty"` +} + +// Settings common to both HTTP and TCP upstream connections. +type TCPSettings struct { + // Maximum number of HTTP1 /TCP connections to a destination host. + MaxConnections int32 `json:"maxConnections,omitempty"` + + // TCP connection timeout. + ConnectTimeout string `json:"connectTimeout,omitempty"` +} + +// Settings applicable to HTTP1.1/HTTP2/GRPC connections. +type HTTPSettings struct { + // Maximum number of pending HTTP requests to a destination. Default 1024. + HTTP1MaxPendingRequests int32 `json:"http1MaxPendingRequests,omitempty"` + + // Maximum number of requests to a backend. Default 1024. + HTTP2MaxRequests int32 `json:"http2MaxRequests,omitempty"` + + // Maximum number of requests per connection to a backend. Setting this + // parameter to 1 disables keep alive. + MaxRequestsPerConnection int32 `json:"maxRequestsPerConnection,omitempty"` + + // Maximum number of retries that can be outstanding to all hosts in a + // cluster at a given time. Defaults to 3. + MaxRetries int32 `json:"maxRetries,omitempty"` +} + +// A Circuit breaker implementation that tracks the status of each +// individual host in the upstream service. Applicable to both HTTP and +// TCP services. For HTTP services, hosts that continually return 5xx +// errors for API calls are ejected from the pool for a pre-defined period +// of time. For TCP services, connection timeouts or connection +// failures to a given host counts as an error when measuring the +// consecutive errors metric. See Envoy's [outlier +// detection](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/outlier) +// for more details. +// +// The following rule sets a connection pool size of 100 connections and +// 1000 concurrent HTTP2 requests, with no more than 10 req/connection to +// "reviews" service. In addition, it configures upstream hosts to be +// scanned every 5 mins, such that any host that fails 7 consecutive times +// with 5XX error code will be ejected for 15 minutes. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-cb-policy +// spec: +// host: reviews.prod.svc.cluster.local +// trafficPolicy: +// connectionPool: +// tcp: +// maxConnections: 100 +// http: +// http2MaxRequests: 1000 +// maxRequestsPerConnection: 10 +// outlierDetection: +// consecutiveErrors: 7 +// interval: 5m +// baseEjectionTime: 15m +type OutlierDetection struct { + // Number of errors before a host is ejected from the connection + // pool. Defaults to 5. When the upstream host is accessed over HTTP, a + // 5xx return code qualifies as an error. When the upstream host is + // accessed over an opaque TCP connection, connect timeouts and + // connection error/failure events qualify as an error. + ConsecutiveErrors int32 `json:"consecutiveErrors,omitempty"` + + // Time interval between ejection sweep analysis. format: + // 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s. + Interval string `json:"interval,omitempty"` + + // Minimum ejection duration. A host will remain ejected for a period + // equal to the product of minimum ejection duration and the number of + // times the host has been ejected. This technique allows the system to + // automatically increase the ejection period for unhealthy upstream + // servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 30s. + BaseEjectionTime string `json:"baseEjectionTime,omitempty"` + + // Maximum % of hosts in the load balancing pool for the upstream + // service that can be ejected. Defaults to 10%. + MaxEjectionPercent int32 `json:"maxEjectionPercent,omitempty"` +} + +// SSL/TLS related settings for upstream connections. See Envoy's [TLS +// context](https://www.envoyproxy.io/docs/envoy/latest/api-v1/cluster_manager/cluster_ssl.html#config-cluster-manager-cluster-ssl) +// for more details. These settings are common to both HTTP and TCP upstreams. +// +// For example, the following rule configures a client to use mutual TLS +// for connections to upstream database cluster. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: db-mtls +// spec: +// host: mydbserver.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: MUTUAL +// clientCertificate: /etc/certs/myclientcert.pem +// privateKey: /etc/certs/client_private_key.pem +// caCertificates: /etc/certs/rootcacerts.pem +// +// The following rule configures a client to use TLS when talking to a +// foreign service whose domain matches *.foo.com. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: tls-foo +// spec: +// host: "*.foo.com" +// trafficPolicy: +// tls: +// mode: SIMPLE +// +// The following rule configures a client to use Istio mutual TLS when talking +// to rating services. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: ratings-istio-mtls +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: ISTIO_MUTUAL +type TLSSettings struct { + + // REQUIRED: Indicates whether connections to this port should be secured + // using TLS. The value of this field determines how TLS is enforced. + Mode TLSmode `json:"mode"` + + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client-side TLS certificate to use. + // Should be empty if mode is `ISTIO_MUTUAL`. + ClientCertificate string `json:"clientCertificate,omitempty"` + + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client's private key. + // Should be empty if mode is `ISTIO_MUTUAL`. + PrivateKey string `json:"privateKey,omitempty"` + + // OPTIONAL: The path to the file containing certificate authority + // certificates to use in verifying a presented server certificate. If + // omitted, the proxy will not verify the server's certificate. + // Should be empty if mode is `ISTIO_MUTUAL`. + CaCertificates string `json:"caCertificates,omitempty"` + + // A list of alternate names to verify the subject identity in the + // certificate. If specified, the proxy will verify that the server + // certificate's subject alt name matches one of the specified values. + // Should be empty if mode is `ISTIO_MUTUAL`. + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // SNI string to present to the server during TLS handshake. + // Should be empty if mode is `ISTIO_MUTUAL`. + Sni string `json:"sni,omitempty"` +} + +// TLS connection mode +type TLSmode string + +const ( + // Do not setup a TLS connection to the upstream endpoint. + TLSmodeDisable TLSmode = "DISABLE" + + // Originate a TLS connection to the upstream endpoint. + TLSmodeSimple TLSmode = "SIMPLE" + + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + TLSmodeMutual TLSmode = "MUTUAL" + + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + // Compared to Mutual mode, this mode uses certificates generated + // automatically by Istio for mTLS authentication. When this mode is + // used, all other fields in `TLSSettings` should be empty. + TLSmodeIstioMutual TLSmode = "ISTIO_MUTUAL" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DestinationRuleList is a list of DestinationRule resources +type DestinationRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []DestinationRule `json:"items"` +} diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/doc.go similarity index 68% rename from vendor/k8s.io/api/coordination/v1/doc.go rename to vendor/github.com/knative/pkg/apis/istio/v1alpha3/doc.go index fc2f4f2c6..47ec83dae 100644 --- a/vendor/k8s.io/api/coordination/v1/doc.go +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true - -// +groupName=coordination.k8s.io +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource -package v1 // import "k8s.io/api/coordination/v1" +// +k8s:deepcopy-gen=package +// +groupName=networking.istio.io +package v1alpha3 diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go new file mode 100644 index 000000000..460c3f6a6 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go @@ -0,0 +1,336 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Gateway describes a load balancer operating at the edge of the mesh +// receiving incoming or outgoing HTTP/TCP connections. The specification +// describes a set of ports that should be exposed, the type of protocol to +// use, SNI configuration for the load balancer, etc. +// +// For example, the following gateway spec sets up a proxy to act as a load +// balancer exposing port 80 and 9080 (http), 443 (https), and port 2379 +// (TCP) for ingress. The gateway will be applied to the proxy running on +// a pod with labels "app: my-gateway-controller". While Istio will configure the +// proxy to listen on these ports, it is the responsibility of the user to +// ensure that external traffic to these ports are allowed into the mesh. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// spec: +// selector: +// app: my-gatweway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// httpsRedirect: true # sends 302 redirect for http requests +// - port: +// number: 443 +// name: https +// protocol: HTTPS +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// mode: SIMPLE #enables HTTPS on this port +// serverCertificate: /etc/certs/servercert.pem +// privateKey: /etc/certs/privatekey.pem +// - port: +// number: 9080 +// name: http-wildcard +// protocol: HTTP +// # no hosts implies wildcard match +// - port: +// number: 2379 #to expose internal service via external port 2379 +// name: mongo +// protocol: MONGO +// +// The gateway specification above describes the L4-L6 properties of a load +// balancer. A VirtualService can then be bound to a gateway to control +// the forwarding of traffic arriving at a particular host or gateway port. +// +// For example, the following VirtualService splits traffic for +// https://uk.bookinfo.com/reviews, https://eu.bookinfo.com/reviews, +// http://uk.bookinfo.com:9080/reviews, http://eu.bookinfo.com:9080/reviews +// into two versions (prod and qa) of an internal reviews service on port +// 9080. In addition, requests containing the cookie user: dev-123 will be +// sent to special port 7777 in the qa version. The same rule is also +// applicable inside the mesh for requests to the reviews.prod +// service. This rule is applicable across ports 443, 9080. Note that +// http://uk.bookinfo.com gets redirected to https://uk.bookinfo.com +// (i.e. 80 redirects to 443). +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-rule +// spec: +// hosts: +// - reviews.prod +// - uk.bookinfo.com +// - eu.bookinfo.com +// gateways: +// - my-gateway +// - mesh # applies to all the sidecars in the mesh +// http: +// - match: +// - headers: +// cookie: +// user: dev-123 +// route: +// - destination: +// port: +// number: 7777 +// name: reviews.qa +// - match: +// uri: +// prefix: /reviews/ +// route: +// - destination: +// port: +// number: 9080 # can be omitted if its the only port for reviews +// name: reviews.prod +// weight: 80 +// - destination: +// name: reviews.qa +// weight: 20 +// +// The following VirtualService forwards traffic arriving at (external) port +// 2379 from 172.17.16.0/24 subnet to internal Mongo server on port 5555. This +// rule is not applicable internally in the mesh as the gateway list omits +// the reserved name "mesh". +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// spec: +// hosts: +// - mongosvr #name of Mongo service +// gateways: +// - my-gateway +// tcp: +// - match: +// - port: +// number: 2379 +// sourceSubnet: "172.17.16.0/24" +// route: +// - destination: +// name: mongo.prod +// +type Gateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GatewaySpec `json:"spec"` +} + +type GatewaySpec struct { + // REQUIRED: A list of server specifications. + Servers []Server `json:"servers"` + + // One or more labels that indicate a specific set of pods/VMs + // on which this gateway configuration should be applied. + // If no selectors are provided, the gateway will be implemented by + // the default istio-ingress controller. + Selector map[string]string `json:"selector,omitempty"` +} + +// Server describes the properties of the proxy on a given load balancer port. +// For example, +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-ingress +// spec: +// selector: +// app: my-ingress-controller +// servers: +// - port: +// number: 80 +// name: http2 +// protocol: HTTP2 +// +// Another example +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-tcp-ingress +// spec: +// selector: +// app: my-tcp-ingress-controller +// servers: +// - port: +// number: 27018 +// name: mongo +// protocol: MONGO +// +// The following is an example of TLS configuration for port 443 +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-tls-ingress +// spec: +// selector: +// app: my-tls-ingress-controller +// servers: +// - port: +// number: 443 +// name: https +// protocol: HTTPS +// tls: +// mode: SIMPLE +// serverCertificate: /etc/certs/server.pem +// privateKey: /etc/certs/privatekey.pem +// +type Server struct { + // REQUIRED: The Port on which the proxy should listen for incoming + // connections + Port Port `json:"port"` + + // A list of hosts exposed by this gateway. While + // typically applicable to HTTP services, it can also be used for TCP + // services using TLS with SNI. Standard DNS wildcard prefix syntax + // is permitted. + // + // A VirtualService that is bound to a gateway must having a matching host + // in its default destination. Specifically one of the VirtualService + // destination hosts is a strict suffix of a gateway host or + // a gateway host is a suffix of one of the VirtualService hosts. + Hosts []string `json:"hosts,omitempty"` + + // Set of TLS related options that govern the server's behavior. Use + // these options to control if all http requests should be redirected to + // https, and the TLS modes to use. + TLS *TLSOptions `json:"tls,omitempty"` +} + +type TLSOptions struct { + // If set to true, the load balancer will send a 302 redirect for all + // http connections, asking the clients to use HTTPS. + HTTPSRedirect bool `json:"httpsRedirect"` + + // Optional: Indicates whether connections to this port should be + // secured using TLS. The value of this field determines how TLS is + // enforced. + Mode TLSMode `json:"mode,omitempty"` + + // REQUIRED if mode is "SIMPLE" or "MUTUAL". The path to the file + // holding the server-side TLS certificate to use. + ServerCertificate string `json:"serverCertificate"` + + // REQUIRED if mode is "SIMPLE" or "MUTUAL". The path to the file + // holding the server's private key. + PrivateKey string `json:"privateKey"` + + // REQUIRED if mode is "MUTUAL". The path to a file containing + // certificate authority certificates to use in verifying a presented + // client side certificate. + CaCertificates string `json:"caCertificates"` + + // The credentialName stands for a unique identifier that can be used + // to identify the serverCertificate and the privateKey. The + // credentialName appended with suffix "-cacert" is used to identify + // the CaCertificates associated with this server. Gateway workloads + // capable of fetching credentials from a remote credential store such + // as Kubernetes secrets, will be configured to retrieve the + // serverCertificate and the privateKey using credentialName, instead + // of using the file system paths specified above. If using mutual TLS, + // gateway workload instances will retrieve the CaCertificates using + // credentialName-cacert. The semantics of the name are platform + // dependent. In Kubernetes, the default Istio supplied credential + // server expects the credentialName to match the name of the + // Kubernetes secret that holds the server certificate, the private + // key, and the CA certificate (if using mutual TLS). Set the + // `ISTIO_META_USER_SDS` metadata variable in the gateway's proxy to + // enable the dynamic credential fetching feature. + CredentialName string `json:"credentialName,omitempty"` + + // A list of alternate names to verify the subject identity in the + // certificate presented by the client. + SubjectAltNames []string `json:"subjectAltNames"` +} + +// TLS modes enforced by the proxy +type TLSMode string + +const ( + // If set to "PASSTHROUGH", the proxy will forward the connection + // to the upstream server selected based on the SNI string presented + // by the client. + TLSModePassThrough TLSMode = "PASSTHROUGH" + + // If set to "SIMPLE", the proxy will secure connections with + // standard TLS semantics. + TLSModeSimple TLSMode = "SIMPLE" + + // If set to "MUTUAL", the proxy will secure connections to the + // upstream using mutual TLS by presenting client certificates for + // authentication. + TLSModeMutual TLSMode = "MUTUAL" +) + +// Port describes the properties of a specific port of a service. +type Port struct { + // REQUIRED: A valid non-negative integer port number. + Number int `json:"number"` + + // REQUIRED: The protocol exposed on the port. + // MUST BE one of HTTP|HTTPS|GRPC|HTTP2|MONGO|TCP. + Protocol PortProtocol `json:"protocol"` + + // Label assigned to the port. + Name string `json:"name,omitempty"` +} + +type PortProtocol string + +const ( + ProtocolHTTP PortProtocol = "HTTP" + ProtocolHTTPS PortProtocol = "HTTPS" + ProtocolGRPC PortProtocol = "GRPC" + ProtocolHTTP2 PortProtocol = "HTTP2" + ProtocolMongo PortProtocol = "Mongo" + ProtocolTCP PortProtocol = "TCP" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GatewayList is a list of Gateway resources +type GatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Gateway `json:"items"` +} diff --git a/vendor/k8s.io/api/node/v1beta1/register.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/register.go similarity index 67% rename from vendor/k8s.io/api/node/v1beta1/register.go rename to vendor/github.com/knative/pkg/apis/istio/v1alpha3/register.go index 3c3b61ba4..c2089e5c2 100644 --- a/vendor/k8s.io/api/node/v1beta1/register.go +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1alpha3 import ( + "github.com/knative/pkg/apis/istio" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "node.k8s.io" - // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} +var SchemeGroupVersion = schema.GroupVersion{Group: istio.GroupName, Version: "v1alpha3"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -34,19 +37,20 @@ func Resource(resource string) schema.GroupResource { } var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme ) -// addKnownTypes adds the list of known types to api.Scheme. +// Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &RuntimeClass{}, - &RuntimeClassList{}, + &VirtualService{}, + &Gateway{}, + &DestinationRule{}, + &VirtualServiceList{}, + &GatewayList{}, + &DestinationRuleList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go new file mode 100644 index 000000000..8798cfa37 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go @@ -0,0 +1,882 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "github.com/knative/pkg/apis/istio/common/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VirtualService +type VirtualService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VirtualServiceSpec `json:"spec"` +} + +// A VirtualService defines a set of traffic routing rules to apply when a host is +// addressed. Each routing rule defines matching criteria for traffic of a specific +// protocol. If the traffic is matched, then it is sent to a named destination service +// (or subset/version of it) defined in the registry. +// +// The source of traffic can also be matched in a routing rule. This allows routing +// to be customized for specific client contexts. +// +// The following example routes all HTTP traffic by default to +// pods of the reviews service with label "version: v1". In addition, +// HTTP requests containing /wpcatalog/, /consumercatalog/ url prefixes will +// be rewritten to /newcatalog and sent to pods with label "version: v2". The +// rules will be applied at the gateway named "bookinfo" as well as at all +// the sidecars in the mesh (indicated by the reserved gateway name +// "mesh"). +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews +// gateways: # if omitted, defaults to "mesh" +// - bookinfo +// - mesh +// http: +// - match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews +// subset: v2 +// - route: +// - destination: +// host: reviews +// subset: v1 +// +// A subset/version of a route destination is identified with a reference +// to a named service subset which must be declared in a corresponding +// DestinationRule. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// +// A host name can be defined by only one VirtualService. A single +// VirtualService can be used to describe traffic properties for multiple +// HTTP and TCP ports. +type VirtualServiceSpec struct { + // REQUIRED. The destination address for traffic captured by this virtual + // service. Could be a DNS name with wildcard prefix or a CIDR + // prefix. Depending on the platform, short-names can also be used + // instead of a FQDN (i.e. has no dots in the name). In such a scenario, + // the FQDN of the host would be derived based on the underlying + // platform. + // + // For example on Kubernetes, when hosts contains a short name, Istio will + // interpret the short name based on the namespace of the rule. Thus, when a + // client namespace applies a rule in the "default" namespace containing a name + // "reviews, Istio will setup routes to the "reviews.default.svc.cluster.local" + // service. However, if a different name such as "reviews.sales.svc.cluster.local" + // is used, it would be treated as a FQDN during virtual host matching. + // In Consul, a plain service name would be resolved to the FQDN + // "reviews.service.consul". + // + // Note that the hosts field applies to both HTTP and TCP + // services. Service inside the mesh, i.e., those found in the service + // registry, must always be referred to using their alphanumeric + // names. IP addresses or CIDR prefixes are allowed only for services + // defined via the Gateway. + Hosts []string `json:"hosts"` + + // The names of gateways and sidecars that should apply these routes. A + // single VirtualService is used for sidecars inside the mesh as well + // as for one or more gateways. The selection condition imposed by this field + // can be overridden using the source field in the match conditions of HTTP/TCP + // routes. The reserved word "mesh" is used to imply all the sidecars in + // the mesh. When this field is omitted, the default gateway ("mesh") + // will be used, which would apply the rule to all sidecars in the + // mesh. If a list of gateway names is provided, the rules will apply + // only to the gateways. To apply the rules to both gateways and sidecars, + // specify "mesh" as one of the gateway names. + Gateways []string `json:"gateways,omitempty"` + + // An ordered list of route rules for HTTP traffic. + // The first rule matching an incoming request is used. + HTTP []HTTPRoute `json:"http,omitempty"` + + // An ordered list of route rules for TCP traffic. + // The first rule matching an incoming request is used. + TCP []TCPRoute `json:"tcp,omitempty"` + + TLS []TLSRoute `json:"tls,omitempty"` +} + +// Describes match conditions and actions for routing HTTP/1.1, HTTP2, and +// gRPC traffic. See VirtualService for usage examples. +type HTTPRoute struct { + // Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []HTTPMatchRequest `json:"match,omitempty"` + + // A http rule can either redirect or forward (default) traffic. The + // forwarding target can be one of several versions of a service (see + // glossary in beginning of document). Weights associated with the + // service version determine the proportion of traffic it receives. + Route []HTTPRouteDestination `json:"route,omitempty"` + + // A http rule can either redirect or forward (default) traffic. If + // traffic passthrough option is specified in the rule, + // route/redirect will be ignored. The redirect primitive can be used to + // send a HTTP 302 redirect to a different URI or Authority. + Redirect *HTTPRedirect `json:"redirect,omitempty"` + + // Rewrite HTTP URIs and Authority headers. Rewrite cannot be used with + // Redirect primitive. Rewrite will be performed before forwarding. + Rewrite *HTTPRewrite `json:"rewrite,omitempty"` + + // Indicates that a HTTP/1.1 client connection to this particular route + // should be allowed (and expected) to upgrade to a WebSocket connection. + // The default is false. Istio's reference sidecar implementation (Envoy) + // expects the first request to this route to contain the WebSocket + // upgrade headers. Otherwise, the request will be rejected. Note that + // Websocket allows secondary protocol negotiation which may then be + // subject to further routing rules based on the protocol selected. + WebsocketUpgrade bool `json:"websocketUpgrade,omitempty"` + + // Timeout for HTTP requests. + Timeout string `json:"timeout,omitempty"` + + // Retry policy for HTTP requests. + Retries *HTTPRetry `json:"retries,omitempty"` + + // Fault injection policy to apply on HTTP traffic. + Fault *HTTPFaultInjection `json:"fault,omitempty"` + + // Mirror HTTP traffic to a another destination in addition to forwarding + // the requests to the intended destination. Mirrored traffic is on a + // best effort basis where the sidecar/gateway will not wait for the + // mirrored cluster to respond before returning the response from the + // original destination. Statistics will be generated for the mirrored + // destination. + Mirror *Destination `json:"mirror,omitempty"` + + // Additional HTTP headers to add before forwarding a request to the + // destination service. + DeprecatedAppendHeaders map[string]string `json:"appendHeaders,omitempty"` + + // Header manipulation rules + Headers *Headers `json:"headers,omitempty"` + + // Http headers to remove before returning the response to the caller + RemoveResponseHeaders map[string]string `json:"removeResponseHeaders,omitempty"` + + // Cross-Origin Resource Sharing policy + CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"` +} + +// Headers describes header manipulation rules. +type Headers struct { + // Header manipulation rules to apply before forwarding a request + // to the destination service + Request *HeaderOperations `json:"request,omitempty"` + + // Header manipulation rules to apply before returning a response + // to the caller + Response *HeaderOperations `json:"response,omitempty"` +} + +// HeaderOperations Describes the header manipulations to apply +type HeaderOperations struct { + // Overwrite the headers specified by key with the given values + Set map[string]string `json:"set,omitempty"` + + // Append the given values to the headers specified by keys + // (will create a comma-separated list of values) + Add map[string]string `json:"add,omitempty"` + + // Remove a the specified headers + Remove []string `json:"remove,omitempty"` +} + +// HttpMatchRequest specifies a set of criterion to be met in order for the +// rule to be applied to the HTTP request. For example, the following +// restricts the rule to match only requests where the URL path +// starts with /ratings/v2/ and the request contains a "cookie" with value +// "user=jason". +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - match: +// - headers: +// cookie: +// regex: "^(.*?;)?(user=jason)(;.*)?" +// uri: +// prefix: "/ratings/v2/" +// route: +// - destination: +// host: ratings +// +// HTTPMatchRequest CANNOT be empty. +type HTTPMatchRequest struct { + // URI to match + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + URI *v1alpha1.StringMatch `json:"uri,omitempty"` + + // URI Scheme + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Scheme *v1alpha1.StringMatch `json:"scheme,omitempty"` + + // HTTP Method + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Method *v1alpha1.StringMatch `json:"method,omitempty"` + + // HTTP Authority + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Authority *v1alpha1.StringMatch `json:"authority,omitempty"` + + // The header keys must be lowercase and use hyphen as the separator, + // e.g. _x-request-id_. + // + // Header values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + // **Note:** The keys `uri`, `scheme`, `method`, and `authority` will be ignored. + Headers map[string]v1alpha1.StringMatch `json:"headers,omitempty"` + + // Specifies the ports on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port uint32 `json:"port,omitempty"` + + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `json:"sourceLabels,omitempty"` + + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway match is + // independent of sourceLabels. + Gateways []string `json:"gateways,omitempty"` +} + +type HTTPRouteDestination struct { + // REQUIRED. Destination uniquely identifies the instances of a service + // to which the request/connection should be forwarded to. + Destination Destination `json:"destination"` + + // REQUIRED. The proportion of traffic to be forwarded to the service + // version. (0-100). Sum of weights across destinations SHOULD BE == 100. + // If there is only destination in a rule, the weight value is assumed to + // be 100. + Weight int `json:"weight"` + + // Header manipulation rules + Headers *Headers `json:"headers,omitempty"` +} + +// Destination indicates the network addressable service to which the +// request/connection will be sent after processing a routing rule. The +// destination.name should unambiguously refer to a service in the service +// registry. It can be a short name or a fully qualified domain name from +// the service registry, a resolvable DNS name, an IP address or a service +// name from the service registry and a subset name. The order of inference +// is as follows: +// +// 1. Service registry lookup. The entire name is looked up in the service +// registry. If the lookup succeeds, the search terminates. The requests +// will be routed to any instance of the service in the mesh. When the +// service name consists of a single word, the FQDN will be constructed in +// a platform specific manner. For example, in Kubernetes, the namespace +// associated with the routing rule will be used to identify the service as +// .. However, if the service name contains +// multiple words separated by a dot (e.g., reviews.prod), the name in its +// entirety would be looked up in the service registry. +// +// 2. Runtime DNS lookup by the proxy. If step 1 fails, and the name is not +// an IP address, it will be considered as a DNS name that is not in the +// service registry (e.g., wikipedia.org). The sidecar/gateway will resolve +// the DNS and load balance requests appropriately. See Envoy's strict_dns +// for details. +// +// The following example routes all traffic by default to pods of the +// reviews service with label "version: v1" (i.e., subset v1), and some +// to subset v2, in a kubernetes environment. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews # namespace is same as the client/caller's namespace +// http: +// - match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews +// subset: v2 +// - route: +// - destination: +// host: reviews +// subset: v1 +// +// And the associated DestinationRule +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// +// The following VirtualService sets a timeout of 5s for all calls to +// productpage.prod service. Notice that there are no subsets defined in +// this rule. Istio will fetch all instances of productpage.prod service +// from the service registry and populate the sidecar's load balancing +// pool. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: my-productpage-rule +// spec: +// hosts: +// - productpage.prod # in kubernetes, this applies only to prod namespace +// http: +// - timeout: 5s +// route: +// - destination: +// host: productpage.prod +// +// The following sets a timeout of 5s for all calls to the external +// service wikipedia.org, as there is no internal service of that name. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: my-wiki-rule +// spec: +// hosts: +// - wikipedia.org +// http: +// - timeout: 5s +// route: +// - destination: +// host: wikipedia.org +// +type Destination struct { + // REQUIRED. The name of a service from the service registry. Service + // names are looked up from the platform's service registry (e.g., + // Kubernetes services, Consul services, etc.) and from the hosts + // declared by [ServiceEntry](#ServiceEntry). Traffic forwarded to + // destinations that are not found in either of the two, will be dropped. + // + // *Note for Kubernetes users*: When short names are used (e.g. "reviews" + // instead of "reviews.default.svc.cluster.local"), Istio will interpret + // the short name based on the namespace of the rule, not the service. A + // rule in the "default" namespace containing a host "reviews will be + // interpreted as "reviews.default.svc.cluster.local", irrespective of + // the actual namespace associated with the reviews service. _To avoid + // potential misconfigurations, it is recommended to always use fully + // qualified domain names over short names._ + Host string `json:"host"` + + // The name of a subset within the service. Applicable only to services + // within the mesh. The subset must be defined in a corresponding + // DestinationRule. + Subset string `json:"subset,omitempty"` + + // Specifies the port on the host that is being addressed. If a service + // exposes only a single port it is not required to explicitly select the + // port. + Port PortSelector `json:"port,omitempty"` +} + +// PortSelector specifies the number of a port to be used for +// matching or selection for final routing. +type PortSelector struct { + // Choose one of the fields below. + + // Valid port number + Number uint32 `json:"number,omitempty"` + + // Valid port name + Name string `json:"name,omitempty"` +} + +// Describes match conditions and actions for routing TCP traffic. The +// following routing rule forwards traffic arriving at port 27017 for +// mongo.prod.svc.cluster.local from 172.17.16.* subnet to another Mongo +// server on port 5555. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// spec: +// hosts: +// - mongo.prod.svc.cluster.local +// tcp: +// - match: +// - port: 27017 +// sourceSubnet: "172.17.16.0/24" +// route: +// - destination: +// host: mongo.backup.svc.cluster.local +// port: +// number: 5555 +// ``` +type TCPRoute struct { + // Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []L4MatchAttributes `json:"match"` + + // The destinations to which the connection should be forwarded to. Weights + // must add to 100%. + Route []HTTPRouteDestination `json:"route"` +} + +// Describes match conditions and actions for routing unterminated TLS +// traffic (TLS/HTTPS) The following routing rule forwards unterminated TLS +// traffic arriving at port 443 of gateway called mygateway to internal +// services in the mesh based on the SNI value. +// +// ```yaml +// kind: VirtualService +// metadata: +// name: bookinfo-sni +// spec: +// hosts: +// - '*.bookinfo.com' +// gateways: +// - mygateway +// tls: +// - match: +// - port: 443 +// sniHosts: +// - login.bookinfo.com +// route: +// - destination: +// host: login.prod.svc.cluster.local +// - match: +// - port: 443 +// sniHosts: +// - reviews.bookinfo.com +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// ``` +type TLSRoute struct { + // REQUIRED. Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []TLSMatchAttributes `json:"match"` + + // The destination to which the connection should be forwarded to. + Route []HTTPRouteDestination `json:"route"` +} + +// L4 connection match attributes. Note that L4 connection matching support +// is incomplete. +type L4MatchAttributes struct { + // IPv4 or IPv6 ip address of destination with optional subnet. E.g., + // a.b.c.d/xx form or just a.b.c.d. + DestinationSubnets []string `json:"destinationSubnets,omitempty"` + + // Specifies the port on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port int `json:"port,omitempty"` + + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `json:"sourceLabels,omitempty"` + + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway match is + // independent of sourceLabels. + Gateways []string `json:"gateways,omitempty"` +} + +// TLS connection match attributes. +type TLSMatchAttributes struct { + // REQUIRED. SNI (server name indicator) to match on. Wildcard prefixes + // can be used in the SNI value, e.g., *.com will match foo.example.com + // as well as example.com. An SNI value must be a subset (i.e., fall + // within the domain) of the corresponding virtual service's hosts + SniHosts []string `json:"sniHosts"` + + // IPv4 or IPv6 ip addresses of destination with optional subnet. E.g., + // a.b.c.d/xx form or just a.b.c.d. + DestinationSubnets []string `json:"destinationSubnets,omitempty"` + + // Specifies the port on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port int `json:"port,omitempty"` + + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `json:"sourceLabels,omitempty"` + + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway match is + // independent of sourceLabels. + Gateways []string `json:"gateways,omitempty"` +} + +// HTTPRedirect can be used to send a 302 redirect response to the caller, +// where the Authority/Host and the URI in the response can be swapped with +// the specified values. For example, the following rule redirects +// requests for /v1/getProductRatings API on the ratings service to +// /v1/bookRatings provided by the bookratings service. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - match: +// - uri: +// exact: /v1/getProductRatings +// redirect: +// uri: /v1/bookRatings +// authority: bookratings.default.svc.cluster.local +// ... +// +type HTTPRedirect struct { + // On a redirect, overwrite the Path portion of the URL with this + // value. Note that the entire path will be replaced, irrespective of the + // request URI being matched as an exact path or prefix. + URI string `json:"uri,omitempty"` + + // On a redirect, overwrite the Authority/Host portion of the URL with + // this value. + Authority string `json:"authority,omitempty"` +} + +// HTTPRewrite can be used to rewrite specific parts of a HTTP request +// before forwarding the request to the destination. Rewrite primitive can +// be used only with the HTTPRouteDestinations. The following example +// demonstrates how to rewrite the URL prefix for api call (/ratings) to +// ratings service before making the actual API call. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - match: +// - uri: +// prefix: /ratings +// rewrite: +// uri: /v1/bookRatings +// route: +// - destination: +// host: ratings +// subset: v1 +// +type HTTPRewrite struct { + // rewrite the path (or the prefix) portion of the URI with this + // value. If the original URI was matched based on prefix, the value + // provided in this field will replace the corresponding matched prefix. + URI string `json:"uri,omitempty"` + + // rewrite the Authority/Host header with this value. + Authority string `json:"authority,omitempty"` +} + +// Describes the retry policy to use when a HTTP request fails. For +// example, the following rule sets the maximum number of retries to 3 when +// calling ratings:v1 service, with a 2s timeout per retry attempt. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - route: +// - destination: +// host: ratings +// subset: v1 +// retries: +// attempts: 3 +// perTryTimeout: 2s +// +type HTTPRetry struct { + // REQUIRED. Number of retries for a given request. The interval + // between retries will be determined automatically (25ms+). Actual + // number of retries attempted depends on the httpReqTimeout. + Attempts int `json:"attempts"` + + // Timeout per retry attempt for a given request. format: 1h/1m/1s/1ms. MUST BE >=1ms. + PerTryTimeout string `json:"perTryTimeout"` +} + +// Describes the Cross-Origin Resource Sharing (CORS) policy, for a given +// service. Refer to +// https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS +// for further details about cross origin resource sharing. For example, +// the following rule restricts cross origin requests to those originating +// from example.com domain using HTTP POST/GET, and sets the +// Access-Control-Allow-Credentials header to false. In addition, it only +// exposes X-Foo-bar header and sets an expiry period of 1 day. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - route: +// - destination: +// host: ratings +// subset: v1 +// corsPolicy: +// allowOrigin: +// - example.com +// allowMethods: +// - POST +// - GET +// allowCredentials: false +// allowHeaders: +// - X-Foo-Bar +// maxAge: "1d" +// +type CorsPolicy struct { + // The list of origins that are allowed to perform CORS requests. The + // content will be serialized into the Access-Control-Allow-Origin + // header. Wildcard * will allow all origins. + AllowOrigin []string `json:"allowOrigin,omitempty"` + + // List of HTTP methods allowed to access the resource. The content will + // be serialized into the Access-Control-Allow-Methods header. + AllowMethods []string `json:"allowMethods,omitempty"` + + // List of HTTP headers that can be used when requesting the + // resource. Serialized to Access-Control-Allow-Methods header. + AllowHeaders []string `json:"allowHeaders,omitempty"` + + // A white list of HTTP headers that the browsers are allowed to + // access. Serialized into Access-Control-Expose-Headers header. + ExposeHeaders []string `json:"exposeHeaders,omitempty"` + + // Specifies how long the results of a preflight request can be + // cached. Translates to the Access-Control-Max-Age header. + MaxAge string `json:"maxAge,omitempty"` + + // Indicates whether the caller is allowed to send the actual request + // (not the preflight) using credentials. Translates to + // Access-Control-Allow-Credentials header. + AllowCredentials bool `json:"allowCredentials,omitempty"` +} + +// HTTPFaultInjection can be used to specify one or more faults to inject +// while forwarding http requests to the destination specified in a route. +// Fault specification is part of a VirtualService rule. Faults include +// aborting the Http request from downstream service, and/or delaying +// proxying of requests. A fault rule MUST HAVE delay or abort or both. +// +// *Note:* Delay and abort faults are independent of one another, even if +// both are specified simultaneously. +type HTTPFaultInjection struct { + // Delay requests before forwarding, emulating various failures such as + // network issues, overloaded upstream service, etc. + Delay *InjectDelay `json:"delay,omitempty"` + + // Abort Http request attempts and return error codes back to downstream + // service, giving the impression that the upstream service is faulty. + Abort *InjectAbort `json:"abort,omitempty"` +} + +// Delay specification is used to inject latency into the request +// forwarding path. The following example will introduce a 5 second delay +// in 10% of the requests to the "v1" version of the "reviews" +// service from all pods with label env: prod +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews +// http: +// - match: +// - sourceLabels: +// env: prod +// route: +// - destination: +// host: reviews +// subset: v1 +// fault: +// delay: +// percent: 10 +// fixedDelay: 5s +// +// The _fixedDelay_ field is used to indicate the amount of delay in +// seconds. An optional _percent_ field, a value between 0 and 100, can +// be used to only delay a certain percentage of requests. If left +// unspecified, all request will be delayed. +type InjectDelay struct { + // Percentage of requests on which the delay will be injected (0-100). + Percent int `json:"percent,omitempty"` + + // REQUIRED. Add a fixed delay before forwarding the request. Format: + // 1h/1m/1s/1ms. MUST be >=1ms. + FixedDelay string `json:"fixedDelay"` + + // (-- Add a delay (based on an exponential function) before forwarding + // the request. mean delay needed to derive the exponential delay + // values --) + ExponentialDelay string `json:"exponentialDelay,omitempty"` +} + +// Abort specification is used to prematurely abort a request with a +// pre-specified error code. The following example will return an HTTP +// 400 error code for 10% of the requests to the "ratings" service "v1". +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - route: +// - destination: +// host: ratings +// subset: v1 +// fault: +// abort: +// percent: 10 +// httpStatus: 400 +// +// The _httpStatus_ field is used to indicate the HTTP status code to +// return to the caller. The optional _percent_ field, a value between 0 +// and 100, is used to only abort a certain percentage of requests. If +// not specified, all requests are aborted. +type InjectAbort struct { + // Percentage of requests to be aborted with the error code provided (0-100). + Percent int `json:"percent,omitempty"` + + // REQUIRED. HTTP status code to use to abort the Http request. + HTTPStatus int `json:"httpStatus"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VirtualServiceList is a list of VirtualService resources +type VirtualServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []VirtualService `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 000000000..67cf72128 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,1159 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolSettings) DeepCopyInto(out *ConnectionPoolSettings) { + *out = *in + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPSettings) + **out = **in + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPSettings) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolSettings. +func (in *ConnectionPoolSettings) DeepCopy() *ConnectionPoolSettings { + if in == nil { + return nil + } + out := new(ConnectionPoolSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsistentHashLB) DeepCopyInto(out *ConsistentHashLB) { + *out = *in + if in.HTTPCookie != nil { + in, out := &in.HTTPCookie, &out.HTTPCookie + *out = new(HTTPCookie) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsistentHashLB. +func (in *ConsistentHashLB) DeepCopy() *ConsistentHashLB { + if in == nil { + return nil + } + out := new(ConsistentHashLB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsPolicy) DeepCopyInto(out *CorsPolicy) { + *out = *in + if in.AllowOrigin != nil { + in, out := &in.AllowOrigin, &out.AllowOrigin + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsPolicy. +func (in *CorsPolicy) DeepCopy() *CorsPolicy { + if in == nil { + return nil + } + out := new(CorsPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRule) DeepCopyInto(out *DestinationRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRule. +func (in *DestinationRule) DeepCopy() *DestinationRule { + if in == nil { + return nil + } + out := new(DestinationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DestinationRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRuleList) DeepCopyInto(out *DestinationRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DestinationRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleList. +func (in *DestinationRuleList) DeepCopy() *DestinationRuleList { + if in == nil { + return nil + } + out := new(DestinationRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DestinationRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRuleSpec) DeepCopyInto(out *DestinationRuleSpec) { + *out = *in + if in.TrafficPolicy != nil { + in, out := &in.TrafficPolicy, &out.TrafficPolicy + *out = new(TrafficPolicy) + (*in).DeepCopyInto(*out) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]Subset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleSpec. +func (in *DestinationRuleSpec) DeepCopy() *DestinationRuleSpec { + if in == nil { + return nil + } + out := new(DestinationRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCookie) DeepCopyInto(out *HTTPCookie) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCookie. +func (in *HTTPCookie) DeepCopy() *HTTPCookie { + if in == nil { + return nil + } + out := new(HTTPCookie) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFaultInjection) DeepCopyInto(out *HTTPFaultInjection) { + *out = *in + if in.Delay != nil { + in, out := &in.Delay, &out.Delay + *out = new(InjectDelay) + **out = **in + } + if in.Abort != nil { + in, out := &in.Abort, &out.Abort + *out = new(InjectAbort) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFaultInjection. +func (in *HTTPFaultInjection) DeepCopy() *HTTPFaultInjection { + if in == nil { + return nil + } + out := new(HTTPFaultInjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMatchRequest) DeepCopyInto(out *HTTPMatchRequest) { + *out = *in + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]v1alpha1.StringMatch, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatchRequest. +func (in *HTTPMatchRequest) DeepCopy() *HTTPMatchRequest { + if in == nil { + return nil + } + out := new(HTTPMatchRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRedirect) DeepCopyInto(out *HTTPRedirect) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRedirect. +func (in *HTTPRedirect) DeepCopy() *HTTPRedirect { + if in == nil { + return nil + } + out := new(HTTPRedirect) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRetry) DeepCopyInto(out *HTTPRetry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRetry. +func (in *HTTPRetry) DeepCopy() *HTTPRetry { + if in == nil { + return nil + } + out := new(HTTPRetry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRewrite) DeepCopyInto(out *HTTPRewrite) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRewrite. +func (in *HTTPRewrite) DeepCopy() *HTTPRewrite { + if in == nil { + return nil + } + out := new(HTTPRewrite) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]HTTPMatchRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]HTTPRouteDestination, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(HTTPRedirect) + **out = **in + } + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(HTTPRewrite) + **out = **in + } + if in.Retries != nil { + in, out := &in.Retries, &out.Retries + *out = new(HTTPRetry) + **out = **in + } + if in.Fault != nil { + in, out := &in.Fault, &out.Fault + *out = new(HTTPFaultInjection) + (*in).DeepCopyInto(*out) + } + if in.Mirror != nil { + in, out := &in.Mirror, &out.Mirror + *out = new(Destination) + **out = **in + } + if in.DeprecatedAppendHeaders != nil { + in, out := &in.DeprecatedAppendHeaders, &out.DeprecatedAppendHeaders + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(Headers) + (*in).DeepCopyInto(*out) + } + if in.RemoveResponseHeaders != nil { + in, out := &in.RemoveResponseHeaders, &out.RemoveResponseHeaders + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CorsPolicy != nil { + in, out := &in.CorsPolicy, &out.CorsPolicy + *out = new(CorsPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRoute. +func (in *HTTPRoute) DeepCopy() *HTTPRoute { + if in == nil { + return nil + } + out := new(HTTPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteDestination) DeepCopyInto(out *HTTPRouteDestination) { + *out = *in + out.Destination = in.Destination + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = new(Headers) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteDestination. +func (in *HTTPRouteDestination) DeepCopy() *HTTPRouteDestination { + if in == nil { + return nil + } + out := new(HTTPRouteDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSettings) DeepCopyInto(out *HTTPSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSettings. +func (in *HTTPSettings) DeepCopy() *HTTPSettings { + if in == nil { + return nil + } + out := new(HTTPSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderOperations) DeepCopyInto(out *HeaderOperations) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderOperations. +func (in *HeaderOperations) DeepCopy() *HeaderOperations { + if in == nil { + return nil + } + out := new(HeaderOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Headers) DeepCopyInto(out *Headers) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(HeaderOperations) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(HeaderOperations) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Headers. +func (in *Headers) DeepCopy() *Headers { + if in == nil { + return nil + } + out := new(Headers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InjectAbort) DeepCopyInto(out *InjectAbort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InjectAbort. +func (in *InjectAbort) DeepCopy() *InjectAbort { + if in == nil { + return nil + } + out := new(InjectAbort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InjectDelay) DeepCopyInto(out *InjectDelay) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InjectDelay. +func (in *InjectDelay) DeepCopy() *InjectDelay { + if in == nil { + return nil + } + out := new(InjectDelay) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *L4MatchAttributes) DeepCopyInto(out *L4MatchAttributes) { + *out = *in + if in.DestinationSubnets != nil { + in, out := &in.DestinationSubnets, &out.DestinationSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4MatchAttributes. +func (in *L4MatchAttributes) DeepCopy() *L4MatchAttributes { + if in == nil { + return nil + } + out := new(L4MatchAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerSettings) DeepCopyInto(out *LoadBalancerSettings) { + *out = *in + if in.ConsistentHash != nil { + in, out := &in.ConsistentHash, &out.ConsistentHash + *out = new(ConsistentHashLB) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSettings. +func (in *LoadBalancerSettings) DeepCopy() *LoadBalancerSettings { + if in == nil { + return nil + } + out := new(LoadBalancerSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutlierDetection) DeepCopyInto(out *OutlierDetection) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetection. +func (in *OutlierDetection) DeepCopy() *OutlierDetection { + if in == nil { + return nil + } + out := new(OutlierDetection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Port) DeepCopyInto(out *Port) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. +func (in *Port) DeepCopy() *Port { + if in == nil { + return nil + } + out := new(Port) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSelector) DeepCopyInto(out *PortSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSelector. +func (in *PortSelector) DeepCopy() *PortSelector { + if in == nil { + return nil + } + out := new(PortSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortTrafficPolicy) DeepCopyInto(out *PortTrafficPolicy) { + *out = *in + out.Port = in.Port + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerSettings) + (*in).DeepCopyInto(*out) + } + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ConnectionPoolSettings) + (*in).DeepCopyInto(*out) + } + if in.OutlierDetection != nil { + in, out := &in.OutlierDetection, &out.OutlierDetection + *out = new(OutlierDetection) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSSettings) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortTrafficPolicy. +func (in *PortTrafficPolicy) DeepCopy() *PortTrafficPolicy { + if in == nil { + return nil + } + out := new(PortTrafficPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + out.Port = in.Port + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subset) DeepCopyInto(out *Subset) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.TrafficPolicy != nil { + in, out := &in.TrafficPolicy, &out.TrafficPolicy + *out = new(TrafficPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subset. +func (in *Subset) DeepCopy() *Subset { + if in == nil { + return nil + } + out := new(Subset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRoute) DeepCopyInto(out *TCPRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]L4MatchAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]HTTPRouteDestination, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRoute. +func (in *TCPRoute) DeepCopy() *TCPRoute { + if in == nil { + return nil + } + out := new(TCPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSettings) DeepCopyInto(out *TCPSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSettings. +func (in *TCPSettings) DeepCopy() *TCPSettings { + if in == nil { + return nil + } + out := new(TCPSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSMatchAttributes) DeepCopyInto(out *TLSMatchAttributes) { + *out = *in + if in.SniHosts != nil { + in, out := &in.SniHosts, &out.SniHosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DestinationSubnets != nil { + in, out := &in.DestinationSubnets, &out.DestinationSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSMatchAttributes. +func (in *TLSMatchAttributes) DeepCopy() *TLSMatchAttributes { + if in == nil { + return nil + } + out := new(TLSMatchAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSOptions) DeepCopyInto(out *TLSOptions) { + *out = *in + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSOptions. +func (in *TLSOptions) DeepCopy() *TLSOptions { + if in == nil { + return nil + } + out := new(TLSOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSRoute) DeepCopyInto(out *TLSRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]TLSMatchAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]HTTPRouteDestination, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSRoute. +func (in *TLSRoute) DeepCopy() *TLSRoute { + if in == nil { + return nil + } + out := new(TLSRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSettings) DeepCopyInto(out *TLSSettings) { + *out = *in + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSettings. +func (in *TLSSettings) DeepCopy() *TLSSettings { + if in == nil { + return nil + } + out := new(TLSSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficPolicy) DeepCopyInto(out *TrafficPolicy) { + *out = *in + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerSettings) + (*in).DeepCopyInto(*out) + } + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ConnectionPoolSettings) + (*in).DeepCopyInto(*out) + } + if in.OutlierDetection != nil { + in, out := &in.OutlierDetection, &out.OutlierDetection + *out = new(OutlierDetection) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSSettings) + (*in).DeepCopyInto(*out) + } + if in.PortLevelSettings != nil { + in, out := &in.PortLevelSettings, &out.PortLevelSettings + *out = make([]PortTrafficPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPolicy. +func (in *TrafficPolicy) DeepCopy() *TrafficPolicy { + if in == nil { + return nil + } + out := new(TrafficPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualService) DeepCopyInto(out *VirtualService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualService. +func (in *VirtualService) DeepCopy() *VirtualService { + if in == nil { + return nil + } + out := new(VirtualService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceList) DeepCopyInto(out *VirtualServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceList. +func (in *VirtualServiceList) DeepCopy() *VirtualServiceList { + if in == nil { + return nil + } + out := new(VirtualServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceSpec) DeepCopyInto(out *VirtualServiceSpec) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = make([]HTTPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = make([]TCPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]TLSRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpec. +func (in *VirtualServiceSpec) DeepCopy() *VirtualServiceSpec { + if in == nil { + return nil + } + out := new(VirtualServiceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/knative/pkg/apis/testing/conditions.go b/vendor/github.com/knative/pkg/apis/testing/conditions.go new file mode 100644 index 000000000..5aee8880c --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/testing/conditions.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "testing" + + "github.com/knative/pkg/apis" + duckv1b1 "github.com/knative/pkg/apis/duck/v1beta1" + corev1 "k8s.io/api/core/v1" +) + +// CheckCondition checks if condition `c` on `cc` has value `cs`. +func CheckCondition(s *duckv1b1.Status, c apis.ConditionType, cs corev1.ConditionStatus) error { + cond := s.GetCondition(c) + if cond == nil { + return fmt.Errorf("condition %v is nil", c) + } + if cond.Status != cs { + return fmt.Errorf("condition(%v) = %v, wanted: %v", c, cond, cs) + } + return nil +} + +// CheckConditionOngoing checks if the condition is in state `Unknown`. +func CheckConditionOngoing(s *duckv1b1.Status, c apis.ConditionType, t *testing.T) { + t.Helper() + if err := CheckCondition(s, c, corev1.ConditionUnknown); err != nil { + t.Error(err) + } +} + +// CheckConditionFailed checks if the condition is in state `False`. +func CheckConditionFailed(s *duckv1b1.Status, c apis.ConditionType, t *testing.T) { + t.Helper() + if err := CheckCondition(s, c, corev1.ConditionFalse); err != nil { + t.Error(err) + } +} + +// CheckConditionSucceeded checks if the condition is in state `True`. +func CheckConditionSucceeded(s *duckv1b1.Status, c apis.ConditionType, t *testing.T) { + t.Helper() + if err := CheckCondition(s, c, corev1.ConditionTrue); err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/knative/pkg/changeset/commit.go b/vendor/github.com/knative/pkg/changeset/commit.go new file mode 100644 index 000000000..77ed70932 --- /dev/null +++ b/vendor/github.com/knative/pkg/changeset/commit.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package changeset + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + commitIDFile = "HEAD" + koDataPathEnvName = "KO_DATA_PATH" +) + +var ( + commitIDRE = regexp.MustCompile(`^[a-f0-9]{40}$`) +) + +// Get tries to fetch the first 7 digitals of GitHub commit ID from HEAD file in +// KO_DATA_PATH. If it fails, it returns the error it gets. +func Get() (string, error) { + data, err := readFileFromKoData(commitIDFile) + if err != nil { + return "", err + } + commitID := strings.TrimSpace(string(data)) + if !commitIDRE.MatchString(commitID) { + err := fmt.Errorf("%q is not a valid GitHub commit ID", commitID) + return "", err + } + return string(commitID[0:7]), nil +} + +// readFileFromKoData tries to read data as string from the file with given name +// under KO_DATA_PATH then returns the content as string. The file is expected +// to be wrapped into the container from /kodata by ko. If it fails, returns +// the error it gets. +func readFileFromKoData(filename string) ([]byte, error) { + koDataPath := os.Getenv(koDataPathEnvName) + if koDataPath == "" { + err := fmt.Errorf("%q does not exist or is empty", koDataPathEnvName) + return nil, err + } + fullFilename := filepath.Join(koDataPath, filename) + return ioutil.ReadFile(fullFilename) +} diff --git a/vendor/github.com/knative/pkg/changeset/doc.go b/vendor/github.com/knative/pkg/changeset/doc.go new file mode 100644 index 000000000..c56f7ebfb --- /dev/null +++ b/vendor/github.com/knative/pkg/changeset/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package changeset provides Knative utilities for fetching GitHub Commit ID +// from kodata directory. It requires GitHub HEAD file to be linked into +// Knative component source code via the following command: +// ln -s -r .git/HEAD ./cmd//kodata/ +// Then ko will build this file into $KO_DATA_PATH when building the container +// for a Knative component. +package changeset diff --git a/vendor/github.com/knative/pkg/changeset/testdata/HEAD b/vendor/github.com/knative/pkg/changeset/testdata/HEAD new file mode 100644 index 000000000..58b38ebae --- /dev/null +++ b/vendor/github.com/knative/pkg/changeset/testdata/HEAD @@ -0,0 +1 @@ +a2d1bdfe929516d7da141aef68631a7ee6941b2d diff --git a/vendor/github.com/knative/pkg/changeset/testdata/noncommitted/HEAD b/vendor/github.com/knative/pkg/changeset/testdata/noncommitted/HEAD new file mode 100644 index 000000000..89fe2d356 --- /dev/null +++ b/vendor/github.com/knative/pkg/changeset/testdata/noncommitted/HEAD @@ -0,0 +1 @@ +ref: refs/heads/non_committed_branch diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 000000000..5c00fdb6a --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1" + networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface + NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface + // Deprecated: please explicitly pick a version if possible. + Networking() networkingv1alpha3.NetworkingV1alpha3Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client + networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client +} + +// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client +func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return c.authenticationV1alpha1 +} + +// Deprecated: Authentication retrieves the default version of AuthenticationClient. +// Please explicitly pick a version. +func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return c.authenticationV1alpha1 +} + +// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client +func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface { + return c.networkingV1alpha3 +} + +// Deprecated: Networking retrieves the default version of NetworkingClient. +// Please explicitly pick a version. +func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface { + return c.networkingV1alpha3 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.authenticationV1alpha1, err = authenticationv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.authenticationV1alpha1 = authenticationv1alpha1.NewForConfigOrDie(c) + cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.authenticationV1alpha1 = authenticationv1alpha1.New(c) + cs.networkingV1alpha3 = networkingv1alpha3.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go new file mode 100644 index 000000000..1122e50bf --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..df8348fcf --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/knative/pkg/client/clientset/versioned" + authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1" + fakeauthenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake" + networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3" + fakenetworkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client +func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return &fakeauthenticationv1alpha1.FakeAuthenticationV1alpha1{Fake: &c.Fake} +} + +// Authentication retrieves the AuthenticationV1alpha1Client +func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return &fakeauthenticationv1alpha1.FakeAuthenticationV1alpha1{Fake: &c.Fake} +} + +// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client +func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface { + return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake} +} + +// Networking retrieves the NetworkingV1alpha3Client +func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface { + return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake} +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 000000000..87f3c3e0b --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 000000000..f00f2c9fe --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + authenticationv1alpha1.AddToScheme, + networkingv1alpha3.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7d7653848 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..cca6f2788 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + authenticationv1alpha1.AddToScheme, + networkingv1alpha3.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go similarity index 51% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go rename to vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go index b38d9acac..918f9cd78 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,30 +16,31 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + "github.com/knative/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) -type NodeV1beta1Interface interface { +type AuthenticationV1alpha1Interface interface { RESTClient() rest.Interface - RuntimeClassesGetter + PoliciesGetter } -// NodeV1beta1Client is used to interact with features provided by the node.k8s.io group. -type NodeV1beta1Client struct { +// AuthenticationV1alpha1Client is used to interact with features provided by the authentication.istio.io group. +type AuthenticationV1alpha1Client struct { restClient rest.Interface } -func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { - return newRuntimeClasses(c) +func (c *AuthenticationV1alpha1Client) Policies(namespace string) PolicyInterface { + return newPolicies(c, namespace) } -// NewForConfig creates a new NodeV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { +// NewForConfig creates a new AuthenticationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +49,12 @@ func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { if err != nil { return nil, err } - return &NodeV1beta1Client{client}, nil + return &AuthenticationV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new NodeV1beta1Client for the given config and +// NewForConfigOrDie creates a new AuthenticationV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NodeV1beta1Client { +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,16 +62,16 @@ func NewForConfigOrDie(c *rest.Config) *NodeV1beta1Client { return client } -// New creates a new NodeV1beta1Client for the given RESTClient. -func New(c rest.Interface) *NodeV1beta1Client { - return &NodeV1beta1Client{c} +// New creates a new AuthenticationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1alpha1Client { + return &AuthenticationV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() @@ -81,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *NodeV1beta1Client) RESTClient() rest.Interface { +func (c *AuthenticationV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go rename to vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go index 771101956..a1c6bb9fe 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1beta1 +package v1alpha1 diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go new file mode 100644 index 000000000..a00e5d7b2 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go new file mode 100644 index 000000000..f947ca535 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAuthenticationV1alpha1 struct { + *testing.Fake +} + +func (c *FakeAuthenticationV1alpha1) Policies(namespace string) v1alpha1.PolicyInterface { + return &FakePolicies{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuthenticationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go new file mode 100644 index 000000000..f5d54444e --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePolicies implements PolicyInterface +type FakePolicies struct { + Fake *FakeAuthenticationV1alpha1 + ns string +} + +var policiesResource = schema.GroupVersionResource{Group: "authentication.istio.io", Version: "v1alpha1", Resource: "policies"} + +var policiesKind = schema.GroupVersionKind{Group: "authentication.istio.io", Version: "v1alpha1", Kind: "Policy"} + +// Get takes name of the policy, and returns the corresponding policy object, and an error if there is any. +func (c *FakePolicies) Get(name string, options v1.GetOptions) (result *v1alpha1.Policy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(policiesResource, c.ns, name), &v1alpha1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Policy), err +} + +// List takes label and field selectors, and returns the list of Policies that match those selectors. +func (c *FakePolicies) List(opts v1.ListOptions) (result *v1alpha1.PolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(policiesResource, policiesKind, c.ns, opts), &v1alpha1.PolicyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PolicyList{ListMeta: obj.(*v1alpha1.PolicyList).ListMeta} + for _, item := range obj.(*v1alpha1.PolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested policies. +func (c *FakePolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(policiesResource, c.ns, opts)) + +} + +// Create takes the representation of a policy and creates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *FakePolicies) Create(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(policiesResource, c.ns, policy), &v1alpha1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Policy), err +} + +// Update takes the representation of a policy and updates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *FakePolicies) Update(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(policiesResource, c.ns, policy), &v1alpha1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Policy), err +} + +// Delete takes name of the policy and deletes it. Returns an error if one occurs. +func (c *FakePolicies) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(policiesResource, c.ns, name), &v1alpha1.Policy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(policiesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PolicyList{}) + return err +} + +// Patch applies the patch and returns the patched policy. +func (c *FakePolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(policiesResource, c.ns, name, data, subresources...), &v1alpha1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Policy), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go similarity index 87% rename from vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go rename to vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go index 1442649b3..25a1998a2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,6 +16,6 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 -type IngressExpansion interface{} +type PolicyExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go new file mode 100644 index 000000000..961aaf008 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PoliciesGetter has a method to return a PolicyInterface. +// A group's client should implement this interface. +type PoliciesGetter interface { + Policies(namespace string) PolicyInterface +} + +// PolicyInterface has methods to work with Policy resources. +type PolicyInterface interface { + Create(*v1alpha1.Policy) (*v1alpha1.Policy, error) + Update(*v1alpha1.Policy) (*v1alpha1.Policy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Policy, error) + List(opts v1.ListOptions) (*v1alpha1.PolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error) + PolicyExpansion +} + +// policies implements PolicyInterface +type policies struct { + client rest.Interface + ns string +} + +// newPolicies returns a Policies +func newPolicies(c *AuthenticationV1alpha1Client, namespace string) *policies { + return &policies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the policy, and returns the corresponding policy object, and an error if there is any. +func (c *policies) Get(name string, options v1.GetOptions) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("policies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Policies that match those selectors. +func (c *policies) List(opts v1.ListOptions) (result *v1alpha1.PolicyList, err error) { + result = &v1alpha1.PolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested policies. +func (c *policies) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a policy and creates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *policies) Create(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("policies"). + Body(policy). + Do(). + Into(result) + return +} + +// Update takes the representation of a policy and updates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *policies) Update(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("policies"). + Name(policy.Name). + Body(policy). + Do(). + Into(result) + return +} + +// Delete takes name of the policy and deletes it. Returns an error if one occurs. +func (c *policies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("policies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *policies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched policy. +func (c *policies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("policies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go new file mode 100644 index 000000000..242f213f3 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DestinationRulesGetter has a method to return a DestinationRuleInterface. +// A group's client should implement this interface. +type DestinationRulesGetter interface { + DestinationRules(namespace string) DestinationRuleInterface +} + +// DestinationRuleInterface has methods to work with DestinationRule resources. +type DestinationRuleInterface interface { + Create(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error) + Update(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.DestinationRule, error) + List(opts v1.ListOptions) (*v1alpha3.DestinationRuleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) + DestinationRuleExpansion +} + +// destinationRules implements DestinationRuleInterface +type destinationRules struct { + client rest.Interface + ns string +} + +// newDestinationRules returns a DestinationRules +func newDestinationRules(c *NetworkingV1alpha3Client, namespace string) *destinationRules { + return &destinationRules{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the destinationRule, and returns the corresponding destinationRule object, and an error if there is any. +func (c *destinationRules) Get(name string, options v1.GetOptions) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DestinationRules that match those selectors. +func (c *destinationRules) List(opts v1.ListOptions) (result *v1alpha3.DestinationRuleList, err error) { + result = &v1alpha3.DestinationRuleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested destinationRules. +func (c *destinationRules) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a destinationRule and creates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *destinationRules) Create(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Post(). + Namespace(c.ns). + Resource("destinationrules"). + Body(destinationRule). + Do(). + Into(result) + return +} + +// Update takes the representation of a destinationRule and updates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *destinationRules) Update(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("destinationrules"). + Name(destinationRule.Name). + Body(destinationRule). + Do(). + Into(result) + return +} + +// Delete takes name of the destinationRule and deletes it. Returns an error if one occurs. +func (c *destinationRules) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("destinationrules"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *destinationRules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched destinationRule. +func (c *destinationRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("destinationrules"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go rename to vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go index 3af5d054f..6046d1467 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1 +package v1alpha3 diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go new file mode 100644 index 000000000..a00e5d7b2 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go new file mode 100644 index 000000000..e493908eb --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDestinationRules implements DestinationRuleInterface +type FakeDestinationRules struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var destinationrulesResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "destinationrules"} + +var destinationrulesKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "DestinationRule"} + +// Get takes name of the destinationRule, and returns the corresponding destinationRule object, and an error if there is any. +func (c *FakeDestinationRules) Get(name string, options v1.GetOptions) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(destinationrulesResource, c.ns, name), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} + +// List takes label and field selectors, and returns the list of DestinationRules that match those selectors. +func (c *FakeDestinationRules) List(opts v1.ListOptions) (result *v1alpha3.DestinationRuleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(destinationrulesResource, destinationrulesKind, c.ns, opts), &v1alpha3.DestinationRuleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.DestinationRuleList{ListMeta: obj.(*v1alpha3.DestinationRuleList).ListMeta} + for _, item := range obj.(*v1alpha3.DestinationRuleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested destinationRules. +func (c *FakeDestinationRules) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(destinationrulesResource, c.ns, opts)) + +} + +// Create takes the representation of a destinationRule and creates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *FakeDestinationRules) Create(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(destinationrulesResource, c.ns, destinationRule), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} + +// Update takes the representation of a destinationRule and updates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *FakeDestinationRules) Update(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(destinationrulesResource, c.ns, destinationRule), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} + +// Delete takes name of the destinationRule and deletes it. Returns an error if one occurs. +func (c *FakeDestinationRules) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(destinationrulesResource, c.ns, name), &v1alpha3.DestinationRule{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDestinationRules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(destinationrulesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.DestinationRuleList{}) + return err +} + +// Patch applies the patch and returns the patched destinationRule. +func (c *FakeDestinationRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(destinationrulesResource, c.ns, name, data, subresources...), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go new file mode 100644 index 000000000..249414948 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeGateways implements GatewayInterface +type FakeGateways struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var gatewaysResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "gateways"} + +var gatewaysKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "Gateway"} + +// Get takes name of the gateway, and returns the corresponding gateway object, and an error if there is any. +func (c *FakeGateways) Get(name string, options v1.GetOptions) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(gatewaysResource, c.ns, name), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} + +// List takes label and field selectors, and returns the list of Gateways that match those selectors. +func (c *FakeGateways) List(opts v1.ListOptions) (result *v1alpha3.GatewayList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(gatewaysResource, gatewaysKind, c.ns, opts), &v1alpha3.GatewayList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.GatewayList{ListMeta: obj.(*v1alpha3.GatewayList).ListMeta} + for _, item := range obj.(*v1alpha3.GatewayList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested gateways. +func (c *FakeGateways) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(gatewaysResource, c.ns, opts)) + +} + +// Create takes the representation of a gateway and creates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *FakeGateways) Create(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(gatewaysResource, c.ns, gateway), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} + +// Update takes the representation of a gateway and updates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *FakeGateways) Update(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(gatewaysResource, c.ns, gateway), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} + +// Delete takes name of the gateway and deletes it. Returns an error if one occurs. +func (c *FakeGateways) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(gatewaysResource, c.ns, name), &v1alpha3.Gateway{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeGateways) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(gatewaysResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.GatewayList{}) + return err +} + +// Patch applies the patch and returns the patched gateway. +func (c *FakeGateways) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(gatewaysResource, c.ns, name, data, subresources...), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go new file mode 100644 index 000000000..fddb1d475 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go @@ -0,0 +1,48 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNetworkingV1alpha3 struct { + *testing.Fake +} + +func (c *FakeNetworkingV1alpha3) DestinationRules(namespace string) v1alpha3.DestinationRuleInterface { + return &FakeDestinationRules{c, namespace} +} + +func (c *FakeNetworkingV1alpha3) Gateways(namespace string) v1alpha3.GatewayInterface { + return &FakeGateways{c, namespace} +} + +func (c *FakeNetworkingV1alpha3) VirtualServices(namespace string) v1alpha3.VirtualServiceInterface { + return &FakeVirtualServices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNetworkingV1alpha3) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go new file mode 100644 index 000000000..98b25259a --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVirtualServices implements VirtualServiceInterface +type FakeVirtualServices struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var virtualservicesResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "virtualservices"} + +var virtualservicesKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "VirtualService"} + +// Get takes name of the virtualService, and returns the corresponding virtualService object, and an error if there is any. +func (c *FakeVirtualServices) Get(name string, options v1.GetOptions) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(virtualservicesResource, c.ns, name), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} + +// List takes label and field selectors, and returns the list of VirtualServices that match those selectors. +func (c *FakeVirtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(virtualservicesResource, virtualservicesKind, c.ns, opts), &v1alpha3.VirtualServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.VirtualServiceList{ListMeta: obj.(*v1alpha3.VirtualServiceList).ListMeta} + for _, item := range obj.(*v1alpha3.VirtualServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested virtualServices. +func (c *FakeVirtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(virtualservicesResource, c.ns, opts)) + +} + +// Create takes the representation of a virtualService and creates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *FakeVirtualServices) Create(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(virtualservicesResource, c.ns, virtualService), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} + +// Update takes the representation of a virtualService and updates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *FakeVirtualServices) Update(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(virtualservicesResource, c.ns, virtualService), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} + +// Delete takes name of the virtualService and deletes it. Returns an error if one occurs. +func (c *FakeVirtualServices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(virtualservicesResource, c.ns, name), &v1alpha3.VirtualService{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVirtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(virtualservicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.VirtualServiceList{}) + return err +} + +// Patch applies the patch and returns the patched virtualService. +func (c *FakeVirtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, data, subresources...), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go new file mode 100644 index 000000000..151bfef4c --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// GatewaysGetter has a method to return a GatewayInterface. +// A group's client should implement this interface. +type GatewaysGetter interface { + Gateways(namespace string) GatewayInterface +} + +// GatewayInterface has methods to work with Gateway resources. +type GatewayInterface interface { + Create(*v1alpha3.Gateway) (*v1alpha3.Gateway, error) + Update(*v1alpha3.Gateway) (*v1alpha3.Gateway, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.Gateway, error) + List(opts v1.ListOptions) (*v1alpha3.GatewayList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) + GatewayExpansion +} + +// gateways implements GatewayInterface +type gateways struct { + client rest.Interface + ns string +} + +// newGateways returns a Gateways +func newGateways(c *NetworkingV1alpha3Client, namespace string) *gateways { + return &gateways{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the gateway, and returns the corresponding gateway object, and an error if there is any. +func (c *gateways) Get(name string, options v1.GetOptions) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Gateways that match those selectors. +func (c *gateways) List(opts v1.ListOptions) (result *v1alpha3.GatewayList, err error) { + result = &v1alpha3.GatewayList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested gateways. +func (c *gateways) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a gateway and creates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *gateways) Create(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Post(). + Namespace(c.ns). + Resource("gateways"). + Body(gateway). + Do(). + Into(result) + return +} + +// Update takes the representation of a gateway and updates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *gateways) Update(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Put(). + Namespace(c.ns). + Resource("gateways"). + Name(gateway.Name). + Body(gateway). + Do(). + Into(result) + return +} + +// Delete takes name of the gateway and deletes it. Returns an error if one occurs. +func (c *gateways) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("gateways"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *gateways) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched gateway. +func (c *gateways) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("gateways"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/generated_expansion.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go similarity index 77% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/generated_expansion.go rename to vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go index 3059734a9..05e765168 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/generated_expansion.go +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,8 +16,10 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1 +package v1alpha3 -type ClusterTestTypeExpansion interface{} +type DestinationRuleExpansion interface{} -type TestTypeExpansion interface{} +type GatewayExpansion interface{} + +type VirtualServiceExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go new file mode 100644 index 000000000..33bda021c --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1alpha3Interface interface { + RESTClient() rest.Interface + DestinationRulesGetter + GatewaysGetter + VirtualServicesGetter +} + +// NetworkingV1alpha3Client is used to interact with features provided by the networking.istio.io group. +type NetworkingV1alpha3Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1alpha3Client) DestinationRules(namespace string) DestinationRuleInterface { + return newDestinationRules(c, namespace) +} + +func (c *NetworkingV1alpha3Client) Gateways(namespace string) GatewayInterface { + return newGateways(c, namespace) +} + +func (c *NetworkingV1alpha3Client) VirtualServices(namespace string) VirtualServiceInterface { + return newVirtualServices(c, namespace) +} + +// NewForConfig creates a new NetworkingV1alpha3Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1alpha3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1alpha3Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1alpha3Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha3Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1alpha3Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1alpha3Client { + return &NetworkingV1alpha3Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha3.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1alpha3Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go new file mode 100644 index 000000000..11bbb6242 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VirtualServicesGetter has a method to return a VirtualServiceInterface. +// A group's client should implement this interface. +type VirtualServicesGetter interface { + VirtualServices(namespace string) VirtualServiceInterface +} + +// VirtualServiceInterface has methods to work with VirtualService resources. +type VirtualServiceInterface interface { + Create(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error) + Update(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.VirtualService, error) + List(opts v1.ListOptions) (*v1alpha3.VirtualServiceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) + VirtualServiceExpansion +} + +// virtualServices implements VirtualServiceInterface +type virtualServices struct { + client rest.Interface + ns string +} + +// newVirtualServices returns a VirtualServices +func newVirtualServices(c *NetworkingV1alpha3Client, namespace string) *virtualServices { + return &virtualServices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the virtualService, and returns the corresponding virtualService object, and an error if there is any. +func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VirtualServices that match those selectors. +func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) { + result = &v1alpha3.VirtualServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested virtualServices. +func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a virtualService and creates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *virtualServices) Create(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Post(). + Namespace(c.ns). + Resource("virtualservices"). + Body(virtualService). + Do(). + Into(result) + return +} + +// Update takes the representation of a virtualService and updates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *virtualServices) Update(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Put(). + Namespace(c.ns). + Resource("virtualservices"). + Name(virtualService.Name). + Body(virtualService). + Do(). + Into(result) + return +} + +// Delete takes name of the virtualService and deletes it. Returns an error if one occurs. +func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("virtualservices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched virtualService. +func (c *virtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("virtualservices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/informers/auditregistration/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/interface.go similarity index 84% rename from vendor/k8s.io/client-go/informers/auditregistration/interface.go rename to vendor/github.com/knative/pkg/client/informers/externalversions/authentication/interface.go index 0f1682c47..2c0a8ce89 100644 --- a/vendor/k8s.io/client-go/informers/auditregistration/interface.go +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,11 +16,11 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package auditregistration +package authentication import ( - v1alpha1 "k8s.io/client-go/informers/auditregistration/v1alpha1" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to each of this group's versions. diff --git a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/interface.go similarity index 74% rename from vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go rename to vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/interface.go index 0a67ba821..a46940a96 100644 --- a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. type Interface interface { - // AuditSinks returns a AuditSinkInformer. - AuditSinks() AuditSinkInformer + // Policies returns a PolicyInformer. + Policies() PolicyInformer } type version struct { @@ -39,7 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// AuditSinks returns a AuditSinkInformer. -func (v *version) AuditSinks() AuditSinkInformer { - return &auditSinkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +// Policies returns a PolicyInformer. +func (v *version) Policies() PolicyInformer { + return &policyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/coordination/v1/lease.go b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go similarity index 51% rename from vendor/k8s.io/client-go/informers/coordination/v1/lease.go rename to vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go index b8a3de471..912860c94 100644 --- a/vendor/k8s.io/client-go/informers/coordination/v1/lease.go +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,74 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( time "time" - coordinationv1 "k8s.io/api/coordination/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/pkg/client/listers/authentication/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/coordination/v1" cache "k8s.io/client-go/tools/cache" ) -// LeaseInformer provides access to a shared informer and lister for -// Leases. -type LeaseInformer interface { +// PolicyInformer provides access to a shared informer and lister for +// Policies. +type PolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.LeaseLister + Lister() v1alpha1.PolicyLister } -type leaseInformer struct { +type policyInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewLeaseInformer constructs a new informer for Lease type. +// NewPolicyInformer constructs a new informer for Policy type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredLeaseInformer(client, namespace, resyncPeriod, indexers, nil) +func NewPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPolicyInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredLeaseInformer constructs a new informer for Lease type. +// NewFilteredPolicyInformer constructs a new informer for Policy type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1().Leases(namespace).List(options) + return client.AuthenticationV1alpha1().Policies(namespace).List(options) }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.CoordinationV1().Leases(namespace).Watch(options) + return client.AuthenticationV1alpha1().Policies(namespace).Watch(options) }, }, - &coordinationv1.Lease{}, + &authenticationv1alpha1.Policy{}, resyncPeriod, indexers, ) } -func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredLeaseInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *policyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *leaseInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&coordinationv1.Lease{}, f.defaultInformer) +func (f *policyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&authenticationv1alpha1.Policy{}, f.defaultInformer) } -func (f *leaseInformer) Lister() v1.LeaseLister { - return v1.NewLeaseLister(f.Informer().GetIndexer()) +func (f *policyInformer) Lister() v1alpha1.PolicyLister { + return v1alpha1.NewPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/factory.go b/vendor/github.com/knative/pkg/client/informers/externalversions/factory.go new file mode 100644 index 000000000..a8e88a5b7 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,186 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/knative/pkg/client/clientset/versioned" + authentication "github.com/knative/pkg/client/informers/externalversions/authentication" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + istio "github.com/knative/pkg/client/informers/externalversions/istio" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Authentication() authentication.Interface + Networking() istio.Interface +} + +func (f *sharedInformerFactory) Authentication() authentication.Interface { + return authentication.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Networking() istio.Interface { + return istio.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/generic.go b/vendor/github.com/knative/pkg/client/informers/externalversions/generic.go new file mode 100644 index 000000000..2a1524610 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=authentication.istio.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("policies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Authentication().V1alpha1().Policies().Informer()}, nil + + // Group=networking.istio.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithResource("destinationrules"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().DestinationRules().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("gateways"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().Gateways().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("virtualservices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().VirtualServices().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..3cd3cb329 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/knative/pkg/client/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/interface.go similarity index 68% rename from vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/interface.go rename to vendor/github.com/knative/pkg/client/informers/externalversions/istio/interface.go index a874fdc5d..d7db9d27a 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/interface.go +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,17 +16,17 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package example +package networking import ( - v1 "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1" - internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1 provides access to shared informers for resources in V1. - V1() v1.Interface + // V1alpha3 provides access to shared informers for resources in V1alpha3. + V1alpha3() v1alpha3.Interface } type group struct { @@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1 returns a new v1.Interface. -func (g *group) V1() v1.Interface { - return v1.New(g.factory, g.namespace, g.tweakListOptions) +// V1alpha3 returns a new v1alpha3.Interface. +func (g *group) V1alpha3() v1alpha3.Interface { + return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go new file mode 100644 index 000000000..d4a035b6e --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/listers/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DestinationRuleInformer provides access to a shared informer and lister for +// DestinationRules. +type DestinationRuleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.DestinationRuleLister +} + +type destinationRuleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDestinationRuleInformer constructs a new informer for DestinationRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDestinationRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDestinationRuleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDestinationRuleInformer constructs a new informer for DestinationRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDestinationRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().DestinationRules(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().DestinationRules(namespace).Watch(options) + }, + }, + &istiov1alpha3.DestinationRule{}, + resyncPeriod, + indexers, + ) +} + +func (f *destinationRuleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDestinationRuleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *destinationRuleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&istiov1alpha3.DestinationRule{}, f.defaultInformer) +} + +func (f *destinationRuleInformer) Lister() v1alpha3.DestinationRuleLister { + return v1alpha3.NewDestinationRuleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go similarity index 52% rename from vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go rename to vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go index 8abd00e17..844704d99 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,74 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1beta1 +package v1alpha3 import ( time "time" - networkingv1beta1 "k8s.io/api/networking/v1beta1" + istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/listers/istio/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) -// IngressInformer provides access to a shared informer and lister for -// Ingresses. -type IngressInformer interface { +// GatewayInformer provides access to a shared informer and lister for +// Gateways. +type GatewayInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IngressLister + Lister() v1alpha3.GatewayLister } -type ingressInformer struct { +type gatewayInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewIngressInformer constructs a new informer for Ingress type. +// NewGatewayInformer constructs a new informer for Gateway type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil) +func NewGatewayInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredGatewayInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredIngressInformer constructs a new informer for Ingress type. +// NewFilteredGatewayInformer constructs a new informer for Gateway type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredGatewayInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().Ingresses(namespace).List(options) + return client.NetworkingV1alpha3().Gateways(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1beta1().Ingresses(namespace).Watch(options) + return client.NetworkingV1alpha3().Gateways(namespace).Watch(options) }, }, - &networkingv1beta1.Ingress{}, + &istiov1alpha3.Gateway{}, resyncPeriod, indexers, ) } -func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *gatewayInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredGatewayInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1beta1.Ingress{}, f.defaultInformer) +func (f *gatewayInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&istiov1alpha3.Gateway{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1beta1.IngressLister { - return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +func (f *gatewayInformer) Lister() v1alpha3.GatewayLister { + return v1alpha3.NewGatewayLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/interface.go similarity index 53% rename from vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go rename to vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/interface.go index ab170dfc8..bd4808d71 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/interface.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,16 +16,20 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1beta1 +package v1alpha3 import ( - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. type Interface interface { - // Ingresses returns a IngressInformer. - Ingresses() IngressInformer + // DestinationRules returns a DestinationRuleInformer. + DestinationRules() DestinationRuleInformer + // Gateways returns a GatewayInformer. + Gateways() GatewayInformer + // VirtualServices returns a VirtualServiceInformer. + VirtualServices() VirtualServiceInformer } type version struct { @@ -39,7 +43,17 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// Ingresses returns a IngressInformer. -func (v *version) Ingresses() IngressInformer { - return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// DestinationRules returns a DestinationRuleInformer. +func (v *version) DestinationRules() DestinationRuleInformer { + return &destinationRuleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Gateways returns a GatewayInformer. +func (v *version) Gateways() GatewayInformer { + return &gatewayInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VirtualServices returns a VirtualServiceInformer. +func (v *version) VirtualServices() VirtualServiceInformer { + return &virtualServiceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go similarity index 53% rename from vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go rename to vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go index 92f4f0400..cecb442ee 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,74 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1beta1 +package v1alpha3 import ( time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/listers/istio/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) -// NetworkPolicyInformer provides access to a shared informer and lister for -// NetworkPolicies. -type NetworkPolicyInformer interface { +// VirtualServiceInformer provides access to a shared informer and lister for +// VirtualServices. +type VirtualServiceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.NetworkPolicyLister + Lister() v1alpha3.VirtualServiceLister } -type networkPolicyInformer struct { +type virtualServiceInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// NewVirtualServiceInformer constructs a new informer for VirtualService type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +func NewVirtualServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVirtualServiceInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// NewFilteredVirtualServiceInformer constructs a new informer for VirtualService type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredVirtualServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(options) + return client.NetworkingV1alpha3().VirtualServices(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(options) + return client.NetworkingV1alpha3().VirtualServices(namespace).Watch(options) }, }, - &extensionsv1beta1.NetworkPolicy{}, + &istiov1alpha3.VirtualService{}, resyncPeriod, indexers, ) } -func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *virtualServiceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVirtualServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.NetworkPolicy{}, f.defaultInformer) +func (f *virtualServiceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&istiov1alpha3.VirtualService{}, f.defaultInformer) } -func (f *networkPolicyInformer) Lister() v1beta1.NetworkPolicyLister { - return v1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +func (f *virtualServiceInformer) Lister() v1alpha3.VirtualServiceLister { + return v1alpha3.NewVirtualServiceLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/knative/pkg/client/injection/client/client.go b/vendor/github.com/knative/pkg/client/injection/client/client.go new file mode 100644 index 000000000..0eb124722 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/client/client.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + "context" + + versioned "github.com/knative/pkg/client/clientset/versioned" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + rest "k8s.io/client-go/rest" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (versioned.Interface)(nil)) + } + return untyped.(versioned.Interface) +} diff --git a/vendor/github.com/knative/pkg/client/injection/client/fake/fake.go b/vendor/github.com/knative/pkg/client/injection/client/fake/fake.go new file mode 100644 index 000000000..ae3688909 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/client/fake/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/knative/pkg/client/clientset/versioned/fake" + client "github.com/knative/pkg/client/injection/client" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, client.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(client.Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (*fake.Clientset)(nil)) + } + return untyped.(*fake.Clientset) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/authenticationfactory.go b/vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/authenticationfactory.go new file mode 100644 index 000000000..2be38ccaa --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/authenticationfactory.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package authenticationfactory + +import ( + "context" + + externalversions "github.com/knative/pkg/client/informers/externalversions" + client "github.com/knative/pkg/client/injection/client" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (externalversions.SharedInformerFactory)(nil)) + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/fake/fake.go b/vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/fake/fake.go new file mode 100644 index 000000000..813706b95 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/authentication/factory/fake/fake.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + externalversions "github.com/knative/pkg/client/informers/externalversions" + fake "github.com/knative/pkg/client/injection/client/fake" + factory "github.com/knative/pkg/client/injection/informers/authentication/factory" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/fake/fake.go b/vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/fake/fake.go new file mode 100644 index 000000000..bd8c1e974 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/knative/pkg/client/injection/informers/authentication/factory/fake" + policy "github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" +) + +var Get = policy.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Authentication().V1alpha1().Policies() + return context.WithValue(ctx, policy.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/policy.go b/vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/policy.go new file mode 100644 index 000000000..449afb544 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/authentication/v1alpha1/policy/policy.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package policy + +import ( + "context" + + v1alpha1 "github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1" + factory "github.com/knative/pkg/client/injection/informers/authentication/factory" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Authentication().V1alpha1().Policies() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.PolicyInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha1.PolicyInformer)(nil)) + } + return untyped.(v1alpha1.PolicyInformer) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/factory/fake/fake.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/factory/fake/fake.go new file mode 100644 index 000000000..8180c2e58 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/factory/fake/fake.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + externalversions "github.com/knative/pkg/client/informers/externalversions" + fake "github.com/knative/pkg/client/injection/client/fake" + factory "github.com/knative/pkg/client/injection/informers/istio/factory" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/factory/istiofactory.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/factory/istiofactory.go new file mode 100644 index 000000000..3dde0a3b8 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/factory/istiofactory.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package istiofactory + +import ( + "context" + + externalversions "github.com/knative/pkg/client/informers/externalversions" + client "github.com/knative/pkg/client/injection/client" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactory(c, controller.GetResyncPeriod(ctx))) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (externalversions.SharedInformerFactory)(nil)) + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/destinationrule.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/destinationrule.go new file mode 100644 index 000000000..eca63acd5 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/destinationrule.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package destinationrule + +import ( + "context" + + v1alpha3 "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3" + factory "github.com/knative/pkg/client/injection/informers/istio/factory" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().DestinationRules() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.DestinationRuleInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha3.DestinationRuleInformer)(nil)) + } + return untyped.(v1alpha3.DestinationRuleInformer) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/fake/fake.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/fake/fake.go new file mode 100644 index 000000000..2df358ca4 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/knative/pkg/client/injection/informers/istio/factory/fake" + destinationrule "github.com/knative/pkg/client/injection/informers/istio/v1alpha3/destinationrule" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" +) + +var Get = destinationrule.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().DestinationRules() + return context.WithValue(ctx, destinationrule.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/fake/fake.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/fake/fake.go new file mode 100644 index 000000000..b7ba4e9b2 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/knative/pkg/client/injection/informers/istio/factory/fake" + gateway "github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" +) + +var Get = gateway.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().Gateways() + return context.WithValue(ctx, gateway.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/gateway.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/gateway.go new file mode 100644 index 000000000..b2400e2dd --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/gateway/gateway.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package gateway + +import ( + "context" + + v1alpha3 "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3" + factory "github.com/knative/pkg/client/injection/informers/istio/factory" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().Gateways() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.GatewayInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha3.GatewayInformer)(nil)) + } + return untyped.(v1alpha3.GatewayInformer) +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/fake/fake.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/fake/fake.go new file mode 100644 index 000000000..7ad5cb341 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + fake "github.com/knative/pkg/client/injection/informers/istio/factory/fake" + virtualservice "github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" +) + +var Get = virtualservice.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().VirtualServices() + return context.WithValue(ctx, virtualservice.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/virtualservice.go b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/virtualservice.go new file mode 100644 index 000000000..7206062f9 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/injection/informers/istio/v1alpha3/virtualservice/virtualservice.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package virtualservice + +import ( + "context" + + v1alpha3 "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3" + factory "github.com/knative/pkg/client/injection/informers/istio/factory" + controller "github.com/knative/pkg/controller" + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().VirtualServices() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.VirtualServiceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (v1alpha3.VirtualServiceInformer)(nil)) + } + return untyped.(v1alpha3.VirtualServiceInformer) +} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/expansion_generated.go similarity index 64% rename from vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go rename to vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/expansion_generated.go index ddc494f1c..f62cb91a7 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1/expansion_generated.go +++ b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1 +package v1alpha1 -// LeaseListerExpansion allows custom methods to be added to -// LeaseLister. -type LeaseListerExpansion interface{} +// PolicyListerExpansion allows custom methods to be added to +// PolicyLister. +type PolicyListerExpansion interface{} -// LeaseNamespaceListerExpansion allows custom methods to be added to -// LeaseNamespaceLister. -type LeaseNamespaceListerExpansion interface{} +// PolicyNamespaceListerExpansion allows custom methods to be added to +// PolicyNamespaceLister. +type PolicyNamespaceListerExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go new file mode 100644 index 000000000..a8581d3e9 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PolicyLister helps list Policies. +type PolicyLister interface { + // List lists all Policies in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) + // Policies returns an object that can list and get Policies. + Policies(namespace string) PolicyNamespaceLister + PolicyListerExpansion +} + +// policyLister implements the PolicyLister interface. +type policyLister struct { + indexer cache.Indexer +} + +// NewPolicyLister returns a new PolicyLister. +func NewPolicyLister(indexer cache.Indexer) PolicyLister { + return &policyLister{indexer: indexer} +} + +// List lists all Policies in the indexer. +func (s *policyLister) List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Policy)) + }) + return ret, err +} + +// Policies returns an object that can list and get Policies. +func (s *policyLister) Policies(namespace string) PolicyNamespaceLister { + return policyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PolicyNamespaceLister helps list and get Policies. +type PolicyNamespaceLister interface { + // List lists all Policies in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) + // Get retrieves the Policy from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Policy, error) + PolicyNamespaceListerExpansion +} + +// policyNamespaceLister implements the PolicyNamespaceLister +// interface. +type policyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Policies in the indexer for a given namespace. +func (s policyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Policy)) + }) + return ret, err +} + +// Get retrieves the Policy from the indexer for a given namespace and name. +func (s policyNamespaceLister) Get(name string) (*v1alpha1.Policy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("policy"), name) + } + return obj.(*v1alpha1.Policy), nil +} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go new file mode 100644 index 000000000..ec1ff7556 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DestinationRuleLister helps list DestinationRules. +type DestinationRuleLister interface { + // List lists all DestinationRules in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) + // DestinationRules returns an object that can list and get DestinationRules. + DestinationRules(namespace string) DestinationRuleNamespaceLister + DestinationRuleListerExpansion +} + +// destinationRuleLister implements the DestinationRuleLister interface. +type destinationRuleLister struct { + indexer cache.Indexer +} + +// NewDestinationRuleLister returns a new DestinationRuleLister. +func NewDestinationRuleLister(indexer cache.Indexer) DestinationRuleLister { + return &destinationRuleLister{indexer: indexer} +} + +// List lists all DestinationRules in the indexer. +func (s *destinationRuleLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.DestinationRule)) + }) + return ret, err +} + +// DestinationRules returns an object that can list and get DestinationRules. +func (s *destinationRuleLister) DestinationRules(namespace string) DestinationRuleNamespaceLister { + return destinationRuleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DestinationRuleNamespaceLister helps list and get DestinationRules. +type DestinationRuleNamespaceLister interface { + // List lists all DestinationRules in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) + // Get retrieves the DestinationRule from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.DestinationRule, error) + DestinationRuleNamespaceListerExpansion +} + +// destinationRuleNamespaceLister implements the DestinationRuleNamespaceLister +// interface. +type destinationRuleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DestinationRules in the indexer for a given namespace. +func (s destinationRuleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.DestinationRule)) + }) + return ret, err +} + +// Get retrieves the DestinationRule from the indexer for a given namespace and name. +func (s destinationRuleNamespaceLister) Get(name string) (*v1alpha3.DestinationRule, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("destinationrule"), name) + } + return obj.(*v1alpha3.DestinationRule), nil +} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go new file mode 100644 index 000000000..f3e2ec937 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +// DestinationRuleListerExpansion allows custom methods to be added to +// DestinationRuleLister. +type DestinationRuleListerExpansion interface{} + +// DestinationRuleNamespaceListerExpansion allows custom methods to be added to +// DestinationRuleNamespaceLister. +type DestinationRuleNamespaceListerExpansion interface{} + +// GatewayListerExpansion allows custom methods to be added to +// GatewayLister. +type GatewayListerExpansion interface{} + +// GatewayNamespaceListerExpansion allows custom methods to be added to +// GatewayNamespaceLister. +type GatewayNamespaceListerExpansion interface{} + +// VirtualServiceListerExpansion allows custom methods to be added to +// VirtualServiceLister. +type VirtualServiceListerExpansion interface{} + +// VirtualServiceNamespaceListerExpansion allows custom methods to be added to +// VirtualServiceNamespaceLister. +type VirtualServiceNamespaceListerExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go new file mode 100644 index 000000000..62a78893a --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// GatewayLister helps list Gateways. +type GatewayLister interface { + // List lists all Gateways in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) + // Gateways returns an object that can list and get Gateways. + Gateways(namespace string) GatewayNamespaceLister + GatewayListerExpansion +} + +// gatewayLister implements the GatewayLister interface. +type gatewayLister struct { + indexer cache.Indexer +} + +// NewGatewayLister returns a new GatewayLister. +func NewGatewayLister(indexer cache.Indexer) GatewayLister { + return &gatewayLister{indexer: indexer} +} + +// List lists all Gateways in the indexer. +func (s *gatewayLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Gateway)) + }) + return ret, err +} + +// Gateways returns an object that can list and get Gateways. +func (s *gatewayLister) Gateways(namespace string) GatewayNamespaceLister { + return gatewayNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// GatewayNamespaceLister helps list and get Gateways. +type GatewayNamespaceLister interface { + // List lists all Gateways in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) + // Get retrieves the Gateway from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.Gateway, error) + GatewayNamespaceListerExpansion +} + +// gatewayNamespaceLister implements the GatewayNamespaceLister +// interface. +type gatewayNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Gateways in the indexer for a given namespace. +func (s gatewayNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Gateway)) + }) + return ret, err +} + +// Get retrieves the Gateway from the indexer for a given namespace and name. +func (s gatewayNamespaceLister) Get(name string) (*v1alpha3.Gateway, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("gateway"), name) + } + return obj.(*v1alpha3.Gateway), nil +} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go new file mode 100644 index 000000000..3284cda81 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VirtualServiceLister helps list VirtualServices. +type VirtualServiceLister interface { + // List lists all VirtualServices in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) + // VirtualServices returns an object that can list and get VirtualServices. + VirtualServices(namespace string) VirtualServiceNamespaceLister + VirtualServiceListerExpansion +} + +// virtualServiceLister implements the VirtualServiceLister interface. +type virtualServiceLister struct { + indexer cache.Indexer +} + +// NewVirtualServiceLister returns a new VirtualServiceLister. +func NewVirtualServiceLister(indexer cache.Indexer) VirtualServiceLister { + return &virtualServiceLister{indexer: indexer} +} + +// List lists all VirtualServices in the indexer. +func (s *virtualServiceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.VirtualService)) + }) + return ret, err +} + +// VirtualServices returns an object that can list and get VirtualServices. +func (s *virtualServiceLister) VirtualServices(namespace string) VirtualServiceNamespaceLister { + return virtualServiceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VirtualServiceNamespaceLister helps list and get VirtualServices. +type VirtualServiceNamespaceLister interface { + // List lists all VirtualServices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) + // Get retrieves the VirtualService from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.VirtualService, error) + VirtualServiceNamespaceListerExpansion +} + +// virtualServiceNamespaceLister implements the VirtualServiceNamespaceLister +// interface. +type virtualServiceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VirtualServices in the indexer for a given namespace. +func (s virtualServiceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.VirtualService)) + }) + return ret, err +} + +// Get retrieves the VirtualService from the indexer for a given namespace and name. +func (s virtualServiceNamespaceLister) Get(name string) (*v1alpha3.VirtualService, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("virtualservice"), name) + } + return obj.(*v1alpha3.VirtualService), nil +} diff --git a/vendor/github.com/knative/pkg/cloudevents/OWNERS b/vendor/github.com/knative/pkg/cloudevents/OWNERS new file mode 100644 index 000000000..27343aba0 --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- cloudevents-approvers diff --git a/vendor/github.com/knative/pkg/cloudevents/README.md b/vendor/github.com/knative/pkg/cloudevents/README.md new file mode 100644 index 000000000..8e3e541f4 --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/README.md @@ -0,0 +1,151 @@ +# Knative CloudEvents SDK + +This library produces CloudEvents in version 0.1 compatible form. To learn more +about CloudEvents, see the [Specification](https://github.com/cloudevents/spec). + +There are two roles the SDK fulfills: the [producer](#producer) and the +[consumer](#consumer). The producer creates a cloud event in either +[Binary](#binary) or [Structured](#structured) request format. The producer +assembles and sends the event through an HTTP endpoint. The consumer will +inspect the incoming HTTP request and select the correct decode format. + +This SDK should be wire-compatible with any other producer or consumer of the +supported versions of CloudEvents. + +## Getting Started + +CloudEvents acts as the envelope in which to send a custom object. Define a +CloudEvent type for the events you will be producing. + +Example CloudEvent Type: `dev.knative.cloudevent.example` + +Select a source to identify the originator of this CloudEvent. It should be a +valid URI which represents the subject which created the CloudEvent (cloud +bucket, git repo, etc). + +Example CloudEvent Source: `https://github.com/knative/pkg#cloudevents-example` + +And finally, create a struct that will be the data inside the CloudEvent, +example: + +```go + +type Example struct { + Sequence int `json:"id"` + Message string `json:"message"` +} + +``` + +### Producer + +The producer creates a new `cloudevent.Client,` and then sends 10 `Example` +events to `"http://localhost:8080"`. + +```go + +package main + +import ( + "github.com/knative/pkg/cloudevents" + "log" +) + +type Example struct { + Sequence int `json:"id"` + Message string `json:"message"` +} + +func main() { + c := cloudevents.NewClient( + "http://localhost:8080", + cloudevents.Builder{ + Source: "https://github.com/knative/pkg#cloudevents-example", + EventType: "dev.knative.cloudevent.example", + }, + ) + for i := 0; i < 10; i++ { + data := Example{ + Message: "hello, world!", + Sequence: i, + } + if err := c.Send(data); err != nil { + log.Printf("error sending: %v", err) + } + } +} + +``` + +### Consumer + +The consumer will listen for a post and then inspect the headers to understand +how to decode the request. + +```go + +package main + +import ( + "context" + "log" + "net/http" + "time" + + "github.com/knative/pkg/cloudevents" +) + +type Example struct { + Sequence int `json:"id"` + Message string `json:"message"` +} + +func handler(ctx context.Context, data *Example) { + metadata := cloudevents.FromContext(ctx) + log.Printf("[%s] %s %s: %d, %q", metadata.EventTime.Format(time.RFC3339), metadata.ContentType, metadata.Source, data.Sequence, data.Message) +} + +func main() { + log.Print("listening on port 8080") + log.Fatal(http.ListenAndServe(":8080", cloudevents.Handler(handler))) +} + +``` + +## Request Formats + +### CloudEvents Version 0.1 + +#### Binary + +This is default, but to leverage binary request format: + +```go + + c := cloudevents.NewClient( + "http://localhost:8080", + cloudevents.Builder{ + Source: "https://github.com/knative/pkg#cloudevents-example", + EventType: "dev.knative.cloudevent.example", + Encoding: cloudevents.BinaryV01, + }, + ) + +``` + +#### Structured + +To leverage structured request format: + +```go + + c := cloudevents.NewClient( + "http://localhost:8080", + cloudevents.Builder{ + Source: "https://github.com/knative/pkg#cloudevents-example", + EventType: "dev.knative.cloudevent.example", + Encoding: cloudevents.StructuredV01, + }, + ) + +``` diff --git a/vendor/github.com/knative/pkg/cloudevents/builder.go b/vendor/github.com/knative/pkg/cloudevents/builder.go new file mode 100644 index 000000000..4cf3706d9 --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/builder.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +import ( + "fmt" + "net/http" + "time" + + "github.com/google/uuid" +) + +// CloudEventEncoding is used to tell the builder which encoding to select. +// the default is Binary. +type CloudEventEncoding int + +const ( + // Binary v0.1 + BinaryV01 CloudEventEncoding = iota + // Structured v0.1 + StructuredV01 +) + +// Builder holds settings that do not change over CloudEvents. It is intended +// to represent a builder of only a single CloudEvent type. +type Builder struct { + // A URI describing the event producer. + Source string + // Type of occurrence which has happened. + EventType string + // The version of the `eventType`; this is producer-specific. + EventTypeVersion string + // A link to the schema that the `data` attribute adheres to. + SchemaURL string + // Additional metadata without a well-defined structure. + Extensions map[string]interface{} + + // Encoding specifies the requested output encoding of the CloudEvent. + Encoding CloudEventEncoding +} + +// Build produces a http request with the constant data embedded in the builder +// merged with the new data provided in the build function. The request will +// send a pre-assembled cloud event to the given target. The target is assumed +// to be a URL with a scheme, ie: "http://localhost:8080" +func (b *Builder) Build(target string, data interface{}, overrides ...SendContext) (*http.Request, error) { + if len(overrides) > 1 { + return nil, fmt.Errorf("Build was called with more than one override") + } + + var overridesV01 *V01EventContext + if len(overrides) == 1 { + switch t := overrides[0].(type) { + case V01EventContext: + o := overrides[0].(V01EventContext) + overridesV01 = &o + default: + return nil, fmt.Errorf("Build was called with unknown override type %v", t) + } + } + // TODO: when V02 is supported this will have to shuffle a little. + ctx := b.cloudEventsContextV01(overridesV01) + + if ctx.Source == "" { + return nil, fmt.Errorf("ctx.Source resolved empty") + } + if ctx.EventType == "" { + return nil, fmt.Errorf("ctx.EventType resolved empty") + } + + switch b.Encoding { + case BinaryV01: + return Binary.NewRequest(target, data, ctx) + case StructuredV01: + return Structured.NewRequest(target, data, ctx) + default: + return nil, fmt.Errorf("unsupported encoding: %v", b.Encoding) + } +} + +// cloudEventsContext creates a CloudEvent context object, assumes +// application/json as the content type. +func (b *Builder) cloudEventsContextV01(overrides *V01EventContext) V01EventContext { + ctx := V01EventContext{ + CloudEventsVersion: CloudEventsVersion, + EventType: b.EventType, + EventID: uuid.New().String(), + EventTypeVersion: b.EventTypeVersion, + SchemaURL: b.SchemaURL, + Source: b.Source, + ContentType: "application/json", + EventTime: time.Now(), + Extensions: b.Extensions, + } + if overrides != nil { + if overrides.Source != "" { + ctx.Source = overrides.Source + } + if overrides.EventID != "" { + ctx.EventID = overrides.EventID + } + if overrides.EventType != "" { + ctx.EventType = overrides.EventType + } + if !overrides.EventTime.IsZero() { + ctx.EventTime = overrides.EventTime + } + if overrides.ContentType != "" { + ctx.ContentType = overrides.ContentType + } + if len(overrides.Extensions) > 0 { + if ctx.Extensions == nil { + ctx.Extensions = make(map[string]interface{}) + } + for k, v := range overrides.Extensions { + ctx.Extensions[k] = v + } + } + } + return ctx +} diff --git a/vendor/github.com/knative/pkg/cloudevents/client.go b/vendor/github.com/knative/pkg/cloudevents/client.go new file mode 100644 index 000000000..9846edf1d --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/client.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +import ( + "fmt" + "io/ioutil" + "net/http" +) + +// Client wraps Builder, and is intended to be configured for a single event +// type and target +type Client struct { + builder Builder + Target string +} + +// NewClient returns a CloudEvent Client used to send CloudEvents. It is +// intended that a user would create a new client for each tuple of eventType +// and target. This is an optional helper method to avoid the tricky creation +// of the embedded Builder struct. +func NewClient(target string, builder Builder) *Client { + c := &Client{ + builder: builder, + Target: target, + } + return c +} + +// Send creates a request based on the client's settings and sends the data +// struct to the target set for this client. It returns error if there was an +// issue sending the event, otherwise nil means the event was accepted. +func (c *Client) Send(data interface{}, overrides ...SendContext) error { + req, err := c.builder.Build(c.Target, data, overrides...) + if err != nil { + return err + } + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if accepted(resp) { + return nil + } + return fmt.Errorf("error sending cloudevent: %s", status(resp)) +} + +// accepted is a helper method to understand if the response from the target +// accepted the CloudEvent. +func accepted(resp *http.Response) bool { + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return true + } + return false +} + +// status is a helper method to read the response of the target. +func status(resp *http.Response) string { + status := resp.Status + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Sprintf("Status[%s] error reading response body: %v", status, err) + } + return fmt.Sprintf("Status[%s] %s", status, body) +} diff --git a/vendor/github.com/knative/pkg/cloudevents/doc.go b/vendor/github.com/knative/pkg/cloudevents/doc.go new file mode 100644 index 000000000..62bc3b02c --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloudevents implements utilities for handling CloudEvents. +// For information on the spec, see +// https://github.com/cloudevents/spec/blob/v0.1/http-transport-binding.md +// and +// https://github.com/cloudevents/spec/blob/v0.1/spec.md +package cloudevents diff --git a/vendor/github.com/knative/pkg/cloudevents/encoding_binary.go b/vendor/github.com/knative/pkg/cloudevents/encoding_binary.go new file mode 100644 index 000000000..8ed4fbfaa --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/encoding_binary.go @@ -0,0 +1,125 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +// TODO(inlined): must add header encoding/decoding + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" +) + +const ( + // HeaderCloudEventsVersion is the header for the version of Cloud Events + // used. + HeaderCloudEventsVersion = "CE-CloudEventsVersion" + + // HeaderEventID is the header for the unique ID of this event. + HeaderEventID = "CE-EventID" + + // HeaderEventTime is the OPTIONAL header for the time at which an event + // occurred. + HeaderEventTime = "CE-EventTime" + + // HeaderEventType is the header for type of event represented. Value SHOULD + // be in reverse-dns form. + HeaderEventType = "CE-EventType" + + // HeaderEventTypeVersion is the OPTIONAL header for the version of the + // scheme for the event type. + HeaderEventTypeVersion = "CE-EventTypeVersion" + + // HeaderSchemaURL is the OPTIONAL header for the schema of the event data. + HeaderSchemaURL = "CE-SchemaURL" + + // HeaderSource is the header for the source which emitted this event. + HeaderSource = "CE-Source" + + // HeaderExtensionsPrefix is the OPTIONAL header prefix for CloudEvents extensions + HeaderExtensionsPrefix = "CE-X-" + + // Binary implements Binary encoding/decoding + Binary binary = 0 +) + +type binary int + +// BinarySender implements an interface for sending an EventContext as +// (possibly one of several versions) as a binary encoding HTTP request. +type BinarySender interface { + // AsHeaders converts this EventContext to a set of HTTP headers. + AsHeaders() (http.Header, error) +} + +// BinaryLoader implements an interface for translating a binary encoding HTTP +// request or response to a an EventContext (possibly one of several versions). +type BinaryLoader interface { + // FromHeaders copies data from the supplied HTTP headers into the object. + // Values will be defaulted if necessary. + FromHeaders(in http.Header) error +} + +// FromRequest parses event data and context from an HTTP request. +func (binary) FromRequest(data interface{}, r *http.Request) (LoadContext, error) { + var ec LoadContext + switch { + case r.Header.Get("CE-SpecVersion") == V02CloudEventsVersion: + ec = &V02EventContext{} + case r.Header.Get("CE-CloudEventsVersion") == V01CloudEventsVersion: + ec = &V01EventContext{} + default: + return nil, fmt.Errorf("Could not determine Cloud Events version from header: %+v", r.Header) + } + + if err := ec.FromHeaders(r.Header); err != nil { + return nil, err + } + + if err := unmarshalEventData(ec.DataContentType(), r.Body, data); err != nil { + return nil, err + } + + return ec, nil +} + +// NewRequest creates an HTTP request for Binary content encoding. +func (t binary) NewRequest(urlString string, data interface{}, context SendContext) (*http.Request, error) { + url, err := url.Parse(urlString) + if err != nil { + return nil, err + } + + h, err := context.AsHeaders() + if err != nil { + return nil, err + } + + b, err := marshalEventData(h.Get("Content-Type"), data) + if err != nil { + return nil, err + } + + return &http.Request{ + Method: http.MethodPost, + URL: url, + Header: h, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil +} diff --git a/vendor/github.com/knative/pkg/cloudevents/encoding_structured.go b/vendor/github.com/knative/pkg/cloudevents/encoding_structured.go new file mode 100644 index 000000000..8670241d3 --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/encoding_structured.go @@ -0,0 +1,143 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + // Structured implements the JSON structured encoding/decoding + Structured structured = 0 +) + +type structured int + +// StructuredSender implements an interface for translating an EventContext +// (possibly one of severals versions) to a structured encoding HTTP request. +type StructuredSender interface { + // AsJSON encodes the object into a map from string to JSON data, which + // allows additional keys to be encoded later. + AsJSON() (map[string]json.RawMessage, error) +} + +// StructuredLoader implements an interface for translating a structured +// encoding HTTP request or response to a an EventContext (possibly one of +// several versions). +type StructuredLoader interface { + // FromJSON assumes that the object has already been decoded into a raw map + // from string to json.RawMessage, because this is needed to extract the + // CloudEvents version. + FromJSON(map[string]json.RawMessage) error +} + +// FromRequest parses a CloudEvent from structured content encoding. +func (structured) FromRequest(data interface{}, r *http.Request) (LoadContext, error) { + raw := make(map[string]json.RawMessage) + if err := json.NewDecoder(r.Body).Decode(&raw); err != nil { + return nil, err + } + + rawData := raw["data"] + delete(raw, "data") + + var ec LoadContext + v := "" + if err := json.Unmarshal(raw["specversion"], &v); err == nil && v == V02CloudEventsVersion { + ec = &V02EventContext{} + } else if err := json.Unmarshal(raw["cloudEventsVersion"], &v); err == nil && v == V01CloudEventsVersion { + ec = &V01EventContext{} + } else { + return nil, fmt.Errorf("Could not determine Cloud Events version from payload: %q", data) + } + + if err := ec.FromJSON(raw); err != nil { + return nil, err + } + + contentType := ec.DataContentType() + if contentType == "" { + contentType = contentTypeJSON + } + var reader io.Reader + if !isJSONEncoding(contentType) { + var jsonDecoded string + if err := json.Unmarshal(rawData, &jsonDecoded); err != nil { + return nil, fmt.Errorf("Could not JSON decode %q value %q", contentType, rawData) + } + reader = strings.NewReader(jsonDecoded) + } else { + reader = bytes.NewReader(rawData) + } + if err := unmarshalEventData(contentType, reader, data); err != nil { + return nil, err + } + return ec, nil +} + +// NewRequest creates an HTTP request for Structured content encoding. +func (structured) NewRequest(urlString string, data interface{}, context SendContext) (*http.Request, error) { + url, err := url.Parse(urlString) + if err != nil { + return nil, err + } + + fields, err := context.AsJSON() + if err != nil { + return nil, err + } + + // TODO: remove this defaulting? + contentType := context.DataContentType() + if contentType == "" { + contentType = contentTypeJSON + } + + dataBytes, err := marshalEventData(contentType, data) + if err != nil { + return nil, err + } + if isJSONEncoding(contentType) { + fields["data"] = json.RawMessage(dataBytes) + } else { + fields["data"], err = json.Marshal(string(dataBytes)) + if err != nil { + return nil, err + } + } + + b, err := json.Marshal(fields) + if err != nil { + return nil, err + } + + h := http.Header{} + h.Set(HeaderContentType, ContentTypeStructuredJSON) + return &http.Request{ + Method: http.MethodPost, + URL: url, + Header: h, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil +} diff --git a/vendor/github.com/knative/pkg/cloudevents/event.go b/vendor/github.com/knative/pkg/cloudevents/event.go new file mode 100644 index 000000000..478c1cfe3 --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/event.go @@ -0,0 +1,212 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +import ( + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "reflect" +) + +const ( + // ContentTypeStructuredJSON is the content-type for "Structured" encoding + // where an event envelope is written in JSON and the body is arbitrary + // data which might be an alternate encoding. + ContentTypeStructuredJSON = "application/cloudevents+json" + + // ContentTypeBinaryJSON is the content-type for "Binary" encoding where + // the event context is in HTTP headers and the body is a JSON event data. + ContentTypeBinaryJSON = "application/json" + + // TODO(inlined) what about charset additions? + contentTypeJSON = "application/json" + contentTypeXML = "application/xml" + + // HeaderContentType is the standard HTTP header "Content-Type" + HeaderContentType = "Content-Type" + + // CloudEventsVersion is a legacy alias of V01CloudEventsVersion, for compatibility. + CloudEventsVersion = V01CloudEventsVersion +) + +// EventContext is a legacy un-versioned alias, from when we thought that field names would stay the same. +type EventContext = V01EventContext + +// HTTPMarshaller implements a scheme for decoding CloudEvents over HTTP. +// Implementations are Binary, Structured, and Any +type HTTPMarshaller interface { + FromRequest(data interface{}, r *http.Request) (LoadContext, error) + NewRequest(urlString string, data interface{}, context SendContext) (*http.Request, error) +} + +// ContextTranslator provides a set of translation methods between the +// different versions of the CloudEvents spec, which allows programs to +// interoperate with different versions of the CloudEvents spec by +// converting EventContexts to their preferred version. +type ContextTranslator interface { + // AsV01 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.1 field names, moving fields to or + // from extensions as necessary. + AsV01() V01EventContext + + // AsV02 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.2 field names, moving fields to or + // from extensions as necessary. + AsV02() V02EventContext + + // DataContentType returns the MIME content type for encoding data, which is + // needed by both encoding and decoding. + DataContentType() string +} + +// SendContext provides an interface for extracting information from an +// EventContext (the set of non-data event attributes of a CloudEvent). +type SendContext interface { + ContextTranslator + + StructuredSender + BinarySender +} + +// LoadContext provides an interface for extracting information from an +// EventContext (the set of non-data event attributes of a CloudEvent). +type LoadContext interface { + ContextTranslator + + StructuredLoader + BinaryLoader +} + +// ContextType is a unified interface for both sending and loading the +// CloudEvent data across versions. +type ContextType interface { + ContextTranslator + + StructuredSender + BinarySender + + StructuredLoader + BinaryLoader +} + +func anyError(errs ...error) error { + for _, err := range errs { + if err != nil { + return err + } + } + return nil +} + +func require(name string, value string) error { + if len(value) == 0 { + return fmt.Errorf("missing required field %q", name) + } + return nil +} + +// The Cloud-Events spec allows two forms of JSON encoding: +// 1. The overall message (Structured JSON encoding) +// 2. Just the event data, where the context will be in HTTP headers instead +// +// Case #1 actually includes case #2. In structured binary encoding the JSON +// HTTP body itself allows for cross-encoding of the "data" field. +// This method is only intended for checking that inner JSON encoding type. +func isJSONEncoding(encoding string) bool { + return encoding == contentTypeJSON || encoding == "text/json" +} + +func isXMLEncoding(encoding string) bool { + return encoding == contentTypeXML || encoding == "text/xml" +} + +func unmarshalEventData(encoding string, reader io.Reader, data interface{}) error { + // The Handler tools allow developers to not ask for event data; + // in this case, just don't unmarshal anything + if data == nil { + return nil + } + + // If someone tried to marshal an event into an io.Reader, just assign our existing reader. + // (This is used by event.Mux to determine which type to unmarshal as) + readerPtrType := reflect.TypeOf((*io.Reader)(nil)) + if reflect.TypeOf(data).ConvertibleTo(readerPtrType) { + reflect.ValueOf(data).Elem().Set(reflect.ValueOf(reader)) + return nil + } + if isJSONEncoding(encoding) || encoding == "" { + return json.NewDecoder(reader).Decode(&data) + } + + if isXMLEncoding(encoding) { + return xml.NewDecoder(reader).Decode(&data) + } + + return fmt.Errorf("Cannot decode content type %q", encoding) +} + +func marshalEventData(encoding string, data interface{}) ([]byte, error) { + var b []byte + var err error + + if isJSONEncoding(encoding) { + b, err = json.Marshal(data) + } else if isXMLEncoding(encoding) { + b, err = xml.Marshal(data) + } else { + err = fmt.Errorf("Cannot encode content type %q", encoding) + } + + if err != nil { + return nil, err + } + return b, nil +} + +// FromRequest parses a CloudEvent from any known encoding. +func FromRequest(data interface{}, r *http.Request) (LoadContext, error) { + switch r.Header.Get(HeaderContentType) { + case ContentTypeStructuredJSON: + return Structured.FromRequest(data, r) + case ContentTypeBinaryJSON: + return Binary.FromRequest(data, r) + default: + // TODO: assume binary content mode + // (https://github.com/cloudevents/spec/blob/v0.1/http-transport-binding.md#3-http-message-mapping) + // and that data is ??? (io.Reader?, byte array?) + return nil, fmt.Errorf("Cannot handle encoding %q", r.Header.Get("Content-Type")) + } +} + +// NewRequest craetes an HTTP request for Structured content encoding. +func NewRequest(urlString string, data interface{}, context SendContext) (*http.Request, error) { + return Structured.NewRequest(urlString, data, context) +} + +// Opaque key type used to store V01EventContexts in a context.Context +type contextKeyType struct{} + +var contextKey = contextKeyType{} + +// FromContext loads an V01EventContext from a normal context.Context +func FromContext(ctx context.Context) LoadContext { + return ctx.Value(contextKey).(LoadContext) +} diff --git a/vendor/github.com/knative/pkg/cloudevents/event_v01.go b/vendor/github.com/knative/pkg/cloudevents/event_v01.go new file mode 100644 index 000000000..2ba67ca0c --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/event_v01.go @@ -0,0 +1,236 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + // V01CloudEventsVersion is the version of the CloudEvents spec targeted + // by this library. + V01CloudEventsVersion = "0.1" + + // v0.1 field names + fieldCloudEventsVersion = "CloudEventsVersion" + fieldEventID = "EventID" + fieldEventType = "EventType" +) + +// V01EventContext holds standard metadata about an event. See +// https://github.com/cloudevents/spec/blob/v0.1/spec.md#context-attributes for +// details on these fields. +type V01EventContext struct { + // The version of the CloudEvents specification used by the event. + CloudEventsVersion string `json:"cloudEventsVersion,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + EventID string `json:"eventID"` + // Timestamp when the event happened. + EventTime time.Time `json:"eventTime,omitempty"` + // Type of occurrence which has happened. + EventType string `json:"eventType"` + // The version of the `eventType`; this is producer-specific. + EventTypeVersion string `json:"eventTypeVersion,omitempty"` + // A link to the schema that the `data` attribute adheres to. + SchemaURL string `json:"schemaURL,omitempty"` + // A MIME (RFC 2046) string describing the media type of `data`. + // TODO: Should an empty string assume `application/json`, or auto-detect the content? + ContentType string `json:"contentType,omitempty"` + // A URI describing the event producer. + Source string `json:"source"` + // Additional metadata without a well-defined structure. + Extensions map[string]interface{} `json:"extensions,omitempty"` +} + +// AsV01 implements the ContextTranslator interface. +func (ec V01EventContext) AsV01() V01EventContext { + return ec +} + +// AsV02 implements the ContextTranslator interface. +func (ec V01EventContext) AsV02() V02EventContext { + ret := V02EventContext{ + SpecVersion: V02CloudEventsVersion, + Type: ec.EventType, + Source: ec.Source, + ID: ec.EventID, + Time: ec.EventTime, + SchemaURL: ec.SchemaURL, + ContentType: ec.ContentType, + Extensions: make(map[string]interface{}), + } + // eventTypeVersion was retired in v0.2, so put it in an extension. + if ec.EventTypeVersion != "" { + ret.Extensions["eventtypeversion"] = ec.EventTypeVersion + } + for k, v := range ec.Extensions { + ret.Extensions[k] = v + } + return ret +} + +// AsHeaders implements the BinarySender interface. +func (ec V01EventContext) AsHeaders() (http.Header, error) { + h := http.Header{} + h.Set("CE-CloudEventsVersion", ec.CloudEventsVersion) + h.Set("CE-EventID", ec.EventID) + h.Set("CE-EventType", ec.EventType) + h.Set("CE-Source", ec.Source) + if ec.CloudEventsVersion == "" { + h.Set("CE-CloudEventsVersion", V01CloudEventsVersion) + } + if !ec.EventTime.IsZero() { + h.Set("CE-EventTime", ec.EventTime.Format(time.RFC3339Nano)) + } + if ec.EventTypeVersion != "" { + h.Set("CE-EventTypeVersion", ec.EventTypeVersion) + } + if ec.SchemaURL != "" { + h.Set("CE-SchemaUrl", ec.SchemaURL) + } + if ec.ContentType != "" { + h.Set("Content-Type", ec.ContentType) + } + for k, v := range ec.Extensions { + encoded, err := json.Marshal(v) + if err != nil { + return nil, err + } + // Preserve case in v0.1, even though HTTP headers are case-insensitive. + h["CE-X-"+k] = []string{string(encoded)} + } + return h, nil +} + +// FromHeaders implements the BinaryLoader interface. +func (ec *V01EventContext) FromHeaders(in http.Header) error { + missingField := func(name string) error { + if in.Get("CE-"+name) == "" { + return fmt.Errorf("Missing field %q in %v: %q", "CE-"+name, in, in.Get("CE-"+name)) + } + return nil + } + if err := anyError( + missingField("CloudEventsVersion"), + missingField("EventID"), + missingField("EventType"), + missingField("Source")); err != nil { + return err + } + data := V01EventContext{ + CloudEventsVersion: in.Get("CE-CloudEventsVersion"), + EventID: in.Get("CE-EventID"), + EventType: in.Get("CE-EventType"), + EventTypeVersion: in.Get("CE-EventTypeVersion"), + SchemaURL: in.Get("CE-SchemaURL"), + ContentType: in.Get("Content-Type"), + Source: in.Get("CE-Source"), + Extensions: make(map[string]interface{}), + } + if timeStr := in.Get("CE-EventTime"); timeStr != "" { + var err error + if data.EventTime, err = time.Parse(time.RFC3339Nano, timeStr); err != nil { + return err + } + } + for k, v := range in { + if strings.EqualFold(k[:len("CE-X-")], "CE-X-") { + key := k[len("CE-X-"):] + var tmp interface{} + if err := json.Unmarshal([]byte(v[0]), &tmp); err == nil { + data.Extensions[key] = tmp + } else { + // If we can't unmarshal the data, treat it as a string. + data.Extensions[key] = v[0] + } + } + } + *ec = data + return nil +} + +// AsJSON implements the StructuredSender interface. +func (ec V01EventContext) AsJSON() (map[string]json.RawMessage, error) { + ret := make(map[string]json.RawMessage) + err := anyError( + encodeKey(ret, "cloudEventsVersion", ec.CloudEventsVersion), + encodeKey(ret, "eventID", ec.EventID), + encodeKey(ret, "eventTime", ec.EventTime), + encodeKey(ret, "eventType", ec.EventType), + encodeKey(ret, "eventTypeVersion", ec.EventTypeVersion), + encodeKey(ret, "schemaURL", ec.SchemaURL), + encodeKey(ret, "contentType", ec.ContentType), + encodeKey(ret, "source", ec.Source), + encodeKey(ret, "extensions", ec.Extensions)) + return ret, err +} + +// DataContentType implements the StructuredSender interface. +func (ec V01EventContext) DataContentType() string { + return ec.ContentType +} + +// FromJSON implements the StructuredLoader interface. +func (ec *V01EventContext) FromJSON(in map[string]json.RawMessage) error { + data := V01EventContext{ + CloudEventsVersion: extractKey(in, "cloudEventsVersion"), + EventID: extractKey(in, "eventID"), + EventType: extractKey(in, "eventType"), + Source: extractKey(in, "source"), + } + var err error + if timeStr := extractKey(in, "eventTime"); timeStr != "" { + if data.EventTime, err = time.Parse(time.RFC3339Nano, timeStr); err != nil { + return err + } + } + extractKeyTo(in, "eventTypeVersion", &data.EventTypeVersion) + extractKeyTo(in, "schemaURL", &data.SchemaURL) + extractKeyTo(in, "contentType", &data.ContentType) + if len(in["extensions"]) == 0 { + in["extensions"] = []byte("{}") + } + if err = json.Unmarshal(in["extensions"], &data.Extensions); err != nil { + return err + } + *ec = data + return nil +} + +func encodeKey(out map[string]json.RawMessage, key string, value interface{}) (err error) { + if s, ok := value.(string); ok && s == "" { + // Skip empty strings. + return nil + } + out[key], err = json.Marshal(value) + return +} + +func extractKey(in map[string]json.RawMessage, key string) (s string) { + extractKeyTo(in, key, &s) + return +} + +func extractKeyTo(in map[string]json.RawMessage, key string, out *string) error { + tmp := in[key] + delete(in, key) + return json.Unmarshal(tmp, out) +} diff --git a/vendor/github.com/knative/pkg/cloudevents/event_v02.go b/vendor/github.com/knative/pkg/cloudevents/event_v02.go new file mode 100644 index 000000000..dc64767cc --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/event_v02.go @@ -0,0 +1,261 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + // V02CloudEventsVersion is the version of the CloudEvents spec targeted + // by this library. + V02CloudEventsVersion = "0.2" + + // required attributes + fieldSpecVersion = "specversion" + fieldID = "id" + fieldType = "type" + fieldSource = "source" + fieldTime = "time" + fieldSchemaURL = "schemaurl" + fieldContentType = "contenttype" + headerContentType = "Content-Type" +) + +// V02EventContext represents the non-data attributes of a CloudEvents v0.2 +// event. +type V02EventContext struct { + // The version of the CloudEvents specification used by the event. + SpecVersion string `json:"specversion"` + // The type of the occurrence which has happened. + Type string `json:"type"` + // A URI describing the event producer. + Source string `json:"source"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Timestamp when the event happened. + Time time.Time `json:"time,omitempty"` + // A link to the schema that the `data` attribute adheres to. + SchemaURL string `json:"schemaurl,omitempty"` + // A MIME (RFC2046) string describing the media type of `data`. + // TODO: Should an empty string assume `application/json`, `application/octet-stream`, or auto-detect the content? + ContentType string `json:"contenttype,omitempty"` + // Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-,omitempty"` +} + +// AsV01 implements the ContextTranslator interface. +func (ec V02EventContext) AsV01() V01EventContext { + ret := V01EventContext{ + CloudEventsVersion: V01CloudEventsVersion, + EventID: ec.ID, + EventTime: ec.Time, + EventType: ec.Type, + SchemaURL: ec.SchemaURL, + ContentType: ec.ContentType, + Source: ec.Source, + Extensions: make(map[string]interface{}), + } + for k, v := range ec.Extensions { + // eventTypeVersion was retired in v0.2 + if strings.EqualFold(k, "eventTypeVersion") { + etv, ok := v.(string) + if ok { + ret.EventTypeVersion = etv + } + continue + } + ret.Extensions[k] = v + } + return ret +} + +// AsV02 implements the ContextTranslator interface. +func (ec V02EventContext) AsV02() V02EventContext { + return ec +} + +// AsHeaders implements the BinarySender interface. +func (ec V02EventContext) AsHeaders() (http.Header, error) { + h := http.Header{} + h.Set("CE-"+fieldSpecVersion, ec.SpecVersion) + h.Set("CE-"+fieldType, ec.Type) + h.Set("CE-"+fieldSource, ec.Source) + h.Set("CE-"+fieldID, ec.ID) + if ec.SpecVersion == "" { + h.Set("CE-"+fieldSpecVersion, V02CloudEventsVersion) + } + if !ec.Time.IsZero() { + h.Set("CE-"+fieldTime, ec.Time.Format(time.RFC3339Nano)) + } + if ec.SchemaURL != "" { + h.Set("CE-"+fieldSchemaURL, ec.SchemaURL) + } + if ec.ContentType != "" { + h.Set(headerContentType, ec.ContentType) + } + for k, v := range ec.Extensions { + // Per spec, map-valued extensions are converted to a list of headers as: + // CE-attrib-key + if mapVal, ok := v.(map[string]interface{}); ok { + for subkey, subval := range mapVal { + encoded, err := json.Marshal(subval) + if err != nil { + return nil, err + } + h.Set("CE-"+k+"-"+subkey, string(encoded)) + } + continue + } + encoded, err := json.Marshal(v) + if err != nil { + return nil, err + } + h.Set("CE-"+k, string(encoded)) + } + + return h, nil +} + +// FromHeaders implements the BinaryLoader interface. +func (ec *V02EventContext) FromHeaders(in http.Header) error { + missingField := func(name string) error { + if in.Get("CE-"+name) == "" { + return fmt.Errorf("Missing field %q in %v: %q", "CE-"+name, in, in.Get("CE-"+name)) + } + return nil + } + err := anyError( + missingField(fieldSpecVersion), + missingField(fieldID), + missingField(fieldType), + missingField(fieldSource), + ) + if err != nil { + return err + } + data := V02EventContext{ + ContentType: in.Get(headerContentType), + Extensions: make(map[string]interface{}), + } + // Extensions and top-level fields are mixed under "CE-" headers. + // Extract them all here rather than trying to clear fields in headers. + for k, v := range in { + if strings.EqualFold(k[:len("CE-")], "CE-") { + key, value := strings.ToLower(string(k[len("CE-"):])), v[0] + switch key { + case fieldSpecVersion: + data.SpecVersion = value + case fieldType: + data.Type = value + case fieldSource: + data.Source = value + case fieldID: + data.ID = value + case fieldSchemaURL: + data.SchemaURL = value + case fieldTime: + if data.Time, err = time.Parse(time.RFC3339Nano, value); err != nil { + return err + } + default: + var tmp interface{} + if err = json.Unmarshal([]byte(value), &tmp); err != nil { + tmp = value + } + // Per spec, map-valued extensions are converted to a list of headers as: + // CE-attrib-key. This is where things get a bit crazy... see + // https://github.com/cloudevents/spec/issues/367 for additional notes. + if strings.Contains(key, "-") { + items := strings.SplitN(key, "-", 2) + key, subkey := items[0], items[1] + if _, ok := data.Extensions[key]; !ok { + data.Extensions[key] = make(map[string]interface{}) + } + if submap, ok := data.Extensions[key].(map[string]interface{}); ok { + submap[subkey] = tmp + } + } else { + data.Extensions[key] = tmp + } + } + } + } + *ec = data + return nil +} + +// AsJSON implementsn the StructuredSender interface. +func (ec V02EventContext) AsJSON() (map[string]json.RawMessage, error) { + ret := make(map[string]json.RawMessage) + err := anyError( + encodeKey(ret, fieldSpecVersion, ec.SpecVersion), + encodeKey(ret, fieldType, ec.Type), + encodeKey(ret, fieldSource, ec.Source), + encodeKey(ret, fieldID, ec.ID), + encodeKey(ret, fieldTime, ec.Time), + encodeKey(ret, fieldSchemaURL, ec.SchemaURL), + encodeKey(ret, fieldContentType, ec.ContentType), + ) + if err != nil { + return nil, err + } + for k, v := range ec.Extensions { + if err = encodeKey(ret, k, v); err != nil { + return nil, err + } + } + return ret, nil +} + +// DataContentType implements the StructuredSender interface. +func (ec V02EventContext) DataContentType() string { + return ec.ContentType +} + +// FromJSON implements the StructuredLoader interface. +func (ec *V02EventContext) FromJSON(in map[string]json.RawMessage) error { + data := V02EventContext{ + SpecVersion: extractKey(in, fieldSpecVersion), + Type: extractKey(in, fieldType), + Source: extractKey(in, fieldSource), + ID: extractKey(in, fieldID), + Extensions: make(map[string]interface{}), + } + var err error + if timeStr := extractKey(in, fieldTime); timeStr != "" { + if data.Time, err = time.Parse(time.RFC3339Nano, timeStr); err != nil { + return err + } + } + extractKeyTo(in, fieldSchemaURL, &data.SchemaURL) + extractKeyTo(in, fieldContentType, &data.ContentType) + // Extract the remaining items from in by converting to JSON and then + // unpacking into Extensions. This avoids having to do funny type + // checking/testing in the loop over values. + extensionsJSON, err := json.Marshal(in) + if err != nil { + return err + } + err = json.Unmarshal(extensionsJSON, &data.Extensions) + *ec = data + return err +} diff --git a/vendor/github.com/knative/pkg/cloudevents/handler.go b/vendor/github.com/knative/pkg/cloudevents/handler.go new file mode 100644 index 000000000..3b8575a1c --- /dev/null +++ b/vendor/github.com/knative/pkg/cloudevents/handler.go @@ -0,0 +1,401 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudevents + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/google/uuid" +) + +type handler struct { + numIn int + fnValue reflect.Value + dataType reflect.Type +} + +type failedHandler struct { + err error +} + +type errAndHandler interface { + http.Handler + error +} + +const ( + inParamUsage = "Expected a function taking either no parameters, a context.Context, or (context.Context, any)" + outParamUsage = "Expected a function returning either nothing, an error, (any, error), or (any, SendContext, error)" +) + +var ( + // FYI: Getting the type of an interface is a bit hard in Go because of nil is special: + // 1. Structs & pointers have concrete types, whereas interfaces are actually tuples of + // [implementation vtable, pointer]. + // 2. Literals (such as nil) can be cast to any relevant type. + // Because TypeOf takes an interface{}, a nil interface reference would cast lossily when + // it leaves this stack frame. The workaround is to pass a pointer to an interface and then + // get the type of its reference. + // For example, see: https://play.golang.org/p/_dxLvdkvqvg + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() + sendContextType = reflect.TypeOf((*SendContext)(nil)).Elem() +) + +// Verifies that the inputs to a function have a valid signature; panics otherwise. +// Valid input signatures: +// (), (context.Context), (context.Context, any) +func validateInParamSignature(fnType reflect.Type) error { + switch fnType.NumIn() { + case 2: + fallthrough + case 1: + if !fnType.In(0).ConvertibleTo(contextType) { + return fmt.Errorf("%s; cannot convert parameter 0 from %s to context.Context", inParamUsage, fnType.In(0)) + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature; panics otherwise. +// Valid output signatures: +// (), (error), (any, error) +func validateOutParamSignature(fnType reflect.Type) error { + switch fnType.NumOut() { + case 3: + contextType := fnType.Out(1) + if !contextType.ConvertibleTo(sendContextType) { + return fmt.Errorf("%s; cannot convert return type 1 from %s to SendContext", outParamUsage, contextType) + } + fallthrough + case 2: + fallthrough + case 1: + paramNo := fnType.NumOut() - 1 + paramType := fnType.Out(paramNo) + if !paramType.ConvertibleTo(errorType) { + return fmt.Errorf("%s; cannot convert return type %d from %s to error", outParamUsage, paramNo, paramType) + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// Verifies that a function has the right number of in and out params and that they are +// of allowed types. If successful, returns the expected in-param type, otherwise panics. +func validateFunction(fnType reflect.Type) errAndHandler { + if fnType.Kind() != reflect.Func { + return &failedHandler{err: errors.New("must pass a function to handle events")} + } + err := anyError( + validateInParamSignature(fnType), + validateOutParamSignature(fnType)) + if err != nil { + return &failedHandler{err: err} + } + return nil +} + +// Alocates a new instance of type t and returns: +// asPtr is of type t if t is a pointer type and of type &t otherwise (used for unmarshalling) +// asValue is a Value of type t pointing to the same data as asPtr +func allocate(t reflect.Type) (asPtr interface{}, asValue reflect.Value) { + if t == nil { + return nil, reflect.Value{} + } + if t.Kind() == reflect.Ptr { + reflectPtr := reflect.New(t.Elem()) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + } else { + reflectPtr := reflect.New(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr.Elem() + } + return +} + +func unwrapReturnValues(res []reflect.Value) (interface{}, SendContext, error) { + switch len(res) { + case 0: + return nil, nil, nil + case 1: + if res[0].IsNil() { + return nil, nil, nil + } + // Should be a safe cast due to assertEventHandler() + return nil, nil, res[0].Interface().(error) + case 2: + if res[1].IsNil() { + return res[0].Interface(), nil, nil + } + // Should be a safe cast due to assertEventHandler() + return nil, nil, res[1].Interface().(error) + case 3: + if res[2].IsNil() { + ec := res[1].Interface().(SendContext) + return res[0].Interface(), ec, nil + } + return nil, nil, res[2].Interface().(error) + default: + // Should never happen due to assertEventHandler() + panic("Cannot unmarshal more than 3 return values") + } +} + +// Accepts the results from a handler functions and translates them to an HTTP response +func respondHTTP(outparams []reflect.Value, fn reflect.Value, w http.ResponseWriter) { + res, ec, err := unwrapReturnValues(outparams) + + if err != nil { + log.Print("Failed to handle event: ", err) + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(`Internal server error`)) + return + } + if ec == nil { + eventType := strings.Replace(fn.Type().PkgPath(), "/", ".", -1) + if eventType != "" { + eventType += "." + } + eventType += fn.Type().Name() + if eventType == "" { + eventType = "dev.knative.pkg.cloudevents.unknown" + } + ec = &V01EventContext{ + EventID: uuid.New().String(), + EventType: eventType, + Source: "unknown", // TODO: anything useful here, maybe incoming Host header? + } + } + + if res != nil { + json, err := json.Marshal(res) + if err != nil { + log.Printf("Failed to marshal return value %+v: %s", res, err) + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(`Internal server error`)) + return + } + headers, err := ec.AsHeaders() + if err != nil { + log.Printf("Failed to marshal event context %+v: %s", res, err) + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("Internal server error")) + return + } + for k, v := range headers { + w.Header()[k] = v + } + + w.Write(json) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +// Handler creates an EventHandler that implements http.Handler +// If the fn parameter is not a valid type, will produce an http.Handler that also conforms +// to error and will respond to all HTTP requests with that error. Valid types of fn are: +// +// * func() +// * func() error +// * func() (anything, error) +// * func() (anything, EventContext, error) +// * func(context.Context) +// * func(context.Context) error +// * func(context.Context) (anything, error) +// * func(context.Context) (anything, EventContext, error) +// * func(context.Context, anything) +// * func(context.Context, anything) error +// * func(context.Context, anything) (anything, error) +// * func(context.Context, anything) (anything, EventContext, error) +// +// CloudEvent contexts are available from the context.Context parameter +// CloudEvent data will be deserialized into the "anything" parameter. +// The library supports native decoding with both XML and JSON encoding. +// To accept another advanced type, pass an io.Reader as the input parameter. +// +// HTTP responses are generated based on the return value of fn: +// * any error return value will cause a StatusInternalServerError response +// * a function with no return type or a function returning nil will cause a StatusNoContent response +// * a function that returns a value will cause a StatusOK and render the response as JSON, +// with headers from an EventContext, if appropriate +func Handler(fn interface{}) http.Handler { + fnType := reflect.TypeOf(fn) + err := validateFunction(fnType) + if err != nil { + return err + } + var dataType reflect.Type + if fnType.NumIn() == 2 { + dataType = fnType.In(1) + } + + return &handler{ + numIn: fnType.NumIn(), + dataType: dataType, + fnValue: reflect.ValueOf(fn), + } +} + +// ServeHTTP implements http.Handler +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + args := make([]reflect.Value, 0, 2) + + if h.numIn > 0 { + dataPtr, dataArg := allocate(h.dataType) + eventContext, err := FromRequest(dataPtr, r) + if err != nil { + log.Printf("Failed to handle request %s; error %s", spew.Sdump(r), err) + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`Invalid request`)) + return + } + + ctx := r.Context() + ctx = context.WithValue(ctx, contextKey, eventContext) + args = append(args, reflect.ValueOf(ctx)) + + if h.numIn == 2 { + args = append(args, dataArg) + } + } + + res := h.fnValue.Call(args) + respondHTTP(res, h.fnValue, w) +} + +func (h failedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + log.Print("Failed to handle event: ", h.Error()) + w.WriteHeader(http.StatusNotImplemented) + w.Write([]byte(`Internal server error`)) +} + +func (h failedHandler) Error() string { + return h.err.Error() +} + +// Mux allows developers to handle logically related groups of +// functionality multiplexed based on the event type. +// TODO: Consider dropping Mux or figure out how to handle non-JSON encoding. +type Mux map[string]*handler + +// NewMux creates a new Mux +func NewMux() Mux { + return make(map[string]*handler) +} + +// Handle adds a new handler for a specific event type +// If the fn parameter is not a valid type, the endpoint will respond to all HTTP requests +// with that error. Valid types of fn are: +// +// * func() +// * func() error +// * func() (anything, error) +// * func(context.Context) +// * func(context.Context) error +// * func(context.Context) (anything, error) +// * func(context.Context, anything) +// * func(context.Context, anything) error +// * func(context.Context, anything) (anything, error) +// +// CloudEvent contexts are available from the context.Context parameter +// CloudEvent data will be deserialized into the "anything" parameter. +// The library supports native decoding with both XML and JSON encoding. +// To accept another advanced type, pass an io.Reader as the input parameter. +// +// HTTP responses are generated based on the return value of fn: +// * any error return value will cause a StatusInternalServerError response +// * a function with no return type or a function returning nil will cause a StatusNoContent response +// * a function that returns a value will cause a StatusOK and render the response as JSON +func (m Mux) Handle(eventType string, fn interface{}) error { + fnType := reflect.TypeOf(fn) + err := validateFunction(fnType) + if err != nil { + return err + } + var dataType reflect.Type + if fnType.NumIn() == 2 { + dataType = fnType.In(1) + } + m[eventType] = &handler{ + numIn: fnType.NumIn(), + dataType: dataType, + fnValue: reflect.ValueOf(fn), + } + return nil +} + +// ServeHTTP implements http.Handler +func (m Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var rawData io.Reader + eventContext, err := FromRequest(&rawData, r) + if err != nil { + log.Printf("Failed to handle request: %s %s", err, spew.Sdump(r)) + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`Invalid request`)) + return + } + + c := eventContext.AsV01() + + h := m[c.EventType] + if h == nil { + log.Print("Cloud not find handler for event type", c.EventType) + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(fmt.Sprintf("Event type %q is not supported", c.EventType))) + return + } + + args := make([]reflect.Value, 0, 2) + if h.numIn > 0 { + ctx := r.Context() + ctx = context.WithValue(ctx, contextKey, eventContext) + args = append(args, reflect.ValueOf(ctx)) + } + if h.numIn == 2 { + dataPtr, dataArg := allocate(h.dataType) + if err := unmarshalEventData(c.ContentType, rawData, dataPtr); err != nil { + log.Print("Failed to parse event data", err) + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`Invalid request`)) + return + } + args = append(args, dataArg) + } + + res := h.fnValue.Call(args) + respondHTTP(res, h.fnValue, w) +} diff --git a/vendor/github.com/knative/pkg/code-of-conduct.md b/vendor/github.com/knative/pkg/code-of-conduct.md new file mode 100644 index 000000000..5f04b3187 --- /dev/null +++ b/vendor/github.com/knative/pkg/code-of-conduct.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at +knative-code-of-conduct@googlegroups.com. All complaints will be reviewed and +investigated and will result in a response that is deemed necessary and +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of +specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/args/args.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/args/args.go new file mode 100644 index 000000000..a1a1a5a9e --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/args/args.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + VersionedClientSetPackage string + ExternalVersionsInformersPackage string +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&ca.VersionedClientSetPackage, "versioned-clientset-package", ca.VersionedClientSetPackage, "the full package name for the versioned injection clientset to use") + fs.StringVar(&ca.ExternalVersionsInformersPackage, "external-versions-informers-package", ca.ExternalVersionsInformersPackage, "the full package name for the external versions injection informer to use") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + customArgs := genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + if len(customArgs.VersionedClientSetPackage) == 0 { + return fmt.Errorf("versioned clientset package cannot be empty") + } + if len(customArgs.ExternalVersionsInformersPackage) == 0 { + return fmt.Errorf("external versions informers package cannot be empty") + } + + return nil +} diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/client.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/client.go new file mode 100644 index 000000000..ff42212fe --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/client.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// clientGenerator produces a file of listers for a given GroupVersion and +// type. +type clientGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + clientSetPackage string + filtered bool +} + +var _ generator.Generator = (*clientGenerator)(nil) + +func (g *clientGenerator) Filter(c *generator.Context, t *types.Type) bool { + // We generate a single client, so return true once. + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *clientGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *clientGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *clientGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "clientSetNewForConfigOrDie": c.Universe.Function(types.Name{Package: g.clientSetPackage, Name: "NewForConfigOrDie"}), + "clientSetInterface": c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}), + "injectionRegisterClient": c.Universe.Function(types.Name{Package: "github.com/knative/pkg/injection", Name: "Default.RegisterClient"}), + "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), + "loggingFromContext": c.Universe.Function(types.Name{ + Package: "github.com/knative/pkg/logging", + Name: "FromContext", + }), + } + + sw.Do(injectionClient, m) + + return sw.Error() +} + +var injectionClient = ` +func init() { + {{.injectionRegisterClient|raw}}(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *{{.restConfig|raw}}) context.Context { + return context.WithValue(ctx, Key{}, {{.clientSetNewForConfigOrDie|raw}}(cfg)) +} + +// Get extracts the {{.clientSetInterface|raw}} client from the context. +func Get(ctx context.Context) {{.clientSetInterface|raw}} { + untyped := ctx.Value(Key{}) + if untyped == nil { + {{.loggingFromContext|raw}}(ctx).Fatalf( + "Unable to fetch %T from context.", ({{.clientSetInterface|raw}})(nil)) + } + return untyped.({{.clientSetInterface|raw}}) +} +` diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/factory.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/factory.go new file mode 100644 index 000000000..0c25fde79 --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/factory.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// factoryTestGenerator produces a file of factory injection of a given type. +type factoryGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + cachingClientSetPackage string + sharedInformerFactoryPackage string + filtered bool +} + +var _ generator.Generator = (*factoryGenerator)(nil) + +func (g *factoryGenerator) Filter(c *generator.Context, t *types.Type) bool { + // We generate a single factory, so return true once. + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *factoryGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "cachingClientGet": c.Universe.Type(types.Name{Package: g.cachingClientSetPackage, Name: "Get"}), + "informersNewSharedInformerFactory": c.Universe.Function(types.Name{Package: g.sharedInformerFactoryPackage, Name: "NewSharedInformerFactory"}), + "informersSharedInformerFactory": c.Universe.Function(types.Name{Package: g.sharedInformerFactoryPackage, Name: "SharedInformerFactory"}), + "injectionRegisterInformerFactory": c.Universe.Type(types.Name{Package: "github.com/knative/pkg/injection", Name: "Default.RegisterInformerFactory"}), + "controllerGetResyncPeriod": c.Universe.Type(types.Name{Package: "github.com/knative/pkg/controller", Name: "GetResyncPeriod"}), + "loggingFromContext": c.Universe.Function(types.Name{ + Package: "github.com/knative/pkg/logging", + Name: "FromContext", + }), + } + + sw.Do(injectionFactory, m) + + return sw.Error() +} + +var injectionFactory = ` +func init() { + {{.injectionRegisterInformerFactory|raw}}(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := {{.cachingClientGet|raw}}(ctx) + return context.WithValue(ctx, Key{}, + {{.informersNewSharedInformerFactory|raw}}(c, {{.controllerGetResyncPeriod|raw}}(ctx))) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) {{.informersSharedInformerFactory|raw}} { + untyped := ctx.Value(Key{}) + if untyped == nil { + {{.loggingFromContext|raw}}(ctx).Fatalf( + "Unable to fetch %T from context.", ({{.informersSharedInformerFactory|raw}})(nil)) + } + return untyped.({{.informersSharedInformerFactory|raw}}) +} +` diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeclient.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeclient.go new file mode 100644 index 000000000..36babebf1 --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeclient.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// fakeClientGenerator produces a file of listers for a given GroupVersion and +// type. +type fakeClientGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + filtered bool + + fakeClientPkg string + clientInjectionPkg string +} + +var _ generator.Generator = (*fakeClientGenerator)(nil) + +func (g *fakeClientGenerator) Filter(c *generator.Context, t *types.Type) bool { + // We generate a single client, so return true once. + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *fakeClientGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *fakeClientGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *fakeClientGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "clientKey": c.Universe.Type(types.Name{Package: g.clientInjectionPkg, Name: "Key"}), + "fakeClient": c.Universe.Type(types.Name{Package: g.fakeClientPkg, Name: "Clientset"}), + "injectionRegisterClient": c.Universe.Function(types.Name{ + Package: "github.com/knative/pkg/injection", + Name: "Fake.RegisterClient", + }), + "loggingFromContext": c.Universe.Function(types.Name{ + Package: "github.com/knative/pkg/logging", + Name: "FromContext", + }), + } + + sw.Do(injectionFakeClient, m) + + return sw.Error() +} + +var injectionFakeClient = ` +func init() { + {{.injectionRegisterClient|raw}}(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *{{.fakeClient|raw}}) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, {{.clientKey|raw}}{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *{{.fakeClient|raw}} { + untyped := ctx.Value({{.clientKey|raw}}{}) + if untyped == nil { + {{.loggingFromContext|raw}}(ctx).Fatalf( + "Unable to fetch %T from context.", (*{{.fakeClient|raw}})(nil)) + } + return untyped.(*fake.Clientset) +} +` diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakefactory.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakefactory.go new file mode 100644 index 000000000..1c0953d4b --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakefactory.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// fakeFactoryGenerator produces a file of listers for a given GroupVersion and +// type. +type fakeFactoryGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + filtered bool + + factoryInjectionPkg string + fakeClientInjectionPkg string + sharedInformerFactoryPackage string +} + +var _ generator.Generator = (*fakeFactoryGenerator)(nil) + +func (g *fakeFactoryGenerator) Filter(c *generator.Context, t *types.Type) bool { + // We generate a single factory, so return true once. + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *fakeFactoryGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *fakeFactoryGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *fakeFactoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "factoryKey": c.Universe.Type(types.Name{Package: g.factoryInjectionPkg, Name: "Key"}), + "factoryGet": c.Universe.Function(types.Name{Package: g.factoryInjectionPkg, Name: "Get"}), + "clientGet": c.Universe.Function(types.Name{Package: g.fakeClientInjectionPkg, Name: "Get"}), + "informersNewSharedInformerFactory": c.Universe.Function(types.Name{Package: g.sharedInformerFactoryPackage, Name: "NewSharedInformerFactory"}), + "injectionRegisterInformerFactory": c.Universe.Function(types.Name{ + Package: "github.com/knative/pkg/injection", + Name: "Fake.RegisterInformerFactory", + }), + "controllerGetResyncPeriod": c.Universe.Type(types.Name{Package: "github.com/knative/pkg/controller", Name: "GetResyncPeriod"}), + } + + sw.Do(injectionFakeInformerFactory, m) + + return sw.Error() +} + +var injectionFakeInformerFactory = ` +var Get = {{.factoryGet|raw}} + +func init() { + {{.injectionRegisterInformerFactory|raw}}(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := {{.clientGet|raw}}(ctx) + return context.WithValue(ctx, {{.factoryKey|raw}}{}, + {{.informersNewSharedInformerFactory|raw}}(c, {{.controllerGetResyncPeriod|raw}}(ctx))) +} +` diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeinformer.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeinformer.go new file mode 100644 index 000000000..6dde06070 --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/fakeinformer.go @@ -0,0 +1,114 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// fakeInformerGenerator produces a file of listers for a given GroupVersion and +// type. +type fakeInformerGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + filtered bool + + typeToGenerate *types.Type + groupVersion clientgentypes.GroupVersion + groupGoName string + informerInjectionPkg string + fakeFactoryInjectionPkg string +} + +var _ generator.Generator = (*fakeInformerGenerator)(nil) + +func (g *fakeInformerGenerator) Filter(c *generator.Context, t *types.Type) bool { + // Only process the type for this informer generator. + return t == g.typeToGenerate +} + +func (g *fakeInformerGenerator) Namers(c *generator.Context) namer.NameSystems { + publicPluralNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "EventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPublicPluralNamer(map[string]string{ + "Endpoints": "Endpoints", + }), + } + + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "publicPlural": publicPluralNamer, + } +} + +func (g *fakeInformerGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *fakeInformerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "informerKey": c.Universe.Type(types.Name{Package: g.informerInjectionPkg, Name: "Key"}), + "informerGet": c.Universe.Function(types.Name{Package: g.informerInjectionPkg, Name: "Get"}), + "factoryGet": c.Universe.Function(types.Name{Package: g.fakeFactoryInjectionPkg, Name: "Get"}), + "group": namer.IC(g.groupGoName), + "type": t, + "version": namer.IC(g.groupVersion.Version.String()), + "controllerInformer": c.Universe.Type(types.Name{Package: "github.com/knative/pkg/controller", Name: "Informer"}), + "injectionRegisterInformer": c.Universe.Function(types.Name{ + Package: "github.com/knative/pkg/injection", + Name: "Fake.RegisterInformer", + }), + } + + sw.Do(injectionFakeInformer, m) + + return sw.Error() +} + +var injectionFakeInformer = ` +var Get = {{.informerGet|raw}} + +func init() { + {{.injectionRegisterInformer|raw}}(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, {{.controllerInformer|raw}}) { + f := {{.factoryGet|raw}}(ctx) + inf := f.{{.group}}().{{.version}}().{{.type|publicPlural}}() + return context.WithValue(ctx, {{.informerKey|raw}}{}, inf), inf.Informer() +} +` diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/informer.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/informer.go new file mode 100644 index 000000000..8fcaeefcc --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/informer.go @@ -0,0 +1,123 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// injectionTestGenerator produces a file of listers for a given GroupVersion and +// type. +type injectionGenerator struct { + generator.DefaultGen + outputPackage string + groupVersion clientgentypes.GroupVersion + groupGoName string + typeToGenerate *types.Type + imports namer.ImportTracker + typedInformerPackage string + groupInformerFactoryPackage string +} + +var _ generator.Generator = (*injectionGenerator)(nil) + +func (g *injectionGenerator) Filter(c *generator.Context, t *types.Type) bool { + // Only process the type for this informer generator. + return t == g.typeToGenerate +} + +func (g *injectionGenerator) Namers(c *generator.Context) namer.NameSystems { + publicPluralNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "EventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPublicPluralNamer(map[string]string{ + "Endpoints": "Endpoints", + }), + } + + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "publicPlural": publicPluralNamer, + } +} + +func (g *injectionGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *injectionGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "group": namer.IC(g.groupGoName), + "type": t, + "version": namer.IC(g.groupVersion.Version.String()), + "injectionRegisterInformer": c.Universe.Type(types.Name{Package: "github.com/knative/pkg/injection", Name: "Default.RegisterInformer"}), + "controllerInformer": c.Universe.Type(types.Name{Package: "github.com/knative/pkg/controller", Name: "Informer"}), + "informersTypedInformer": c.Universe.Type(types.Name{Package: g.typedInformerPackage, Name: t.Name.Name + "Informer"}), + "factoryGet": c.Universe.Type(types.Name{Package: g.groupInformerFactoryPackage, Name: "Get"}), + "loggingFromContext": c.Universe.Function(types.Name{ + Package: "github.com/knative/pkg/logging", + Name: "FromContext", + }), + } + + sw.Do(injectionInformer, m) + + return sw.Error() +} + +var injectionInformer = ` +func init() { + {{.injectionRegisterInformer|raw}}(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, {{.controllerInformer|raw}}) { + f := {{.factoryGet|raw}}(ctx) + inf := f.{{.group}}().{{.version}}().{{.type|publicPlural}}() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) {{.informersTypedInformer|raw}} { + untyped := ctx.Value(Key{}) + if untyped == nil { + {{.loggingFromContext|raw}}(ctx).Fatalf( + "Unable to fetch %T from context.", ({{.informersTypedInformer|raw}})(nil)) + } + return untyped.({{.informersTypedInformer|raw}}) +} +` diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/namesystems.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/namesystems.go new file mode 100644 index 000000000..19749529d --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/namesystems.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "strings" + + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), + "lowercaseSingular": &lowercaseSingularNamer{}, + } +} + +// lowercaseSingularNamer implements Namer +type lowercaseSingularNamer struct{} + +// Name returns t's name in all lowercase. +func (n *lowercaseSingularNamer) Name(t *types.Type) string { + return strings.ToLower(t.Name.Name) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// ExceptionNamer allows you specify exceptional cases with exact names. This allows you to have control +// for handling various conflicts, like group and resource names for instance. +type ExceptionNamer struct { + Exceptions map[string]string + KeyFunc func(*types.Type) string + + Delegate namer.Namer +} + +// Name provides the requested name for a type. +func (n *ExceptionNamer) Name(t *types.Type) string { + key := n.KeyFunc(t) + if exception, ok := n.Exceptions[key]; ok { + return exception + } + return n.Delegate.Name(t) +} diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/packages.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/packages.go new file mode 100644 index 000000000..f8359af9b --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/generators/packages.go @@ -0,0 +1,360 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path" + "path/filepath" + "strings" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" + + informergenargs "github.com/knative/pkg/codegen/cmd/injection-gen/args" +) + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) + if !ok { + klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) + } + + versionPackagePath := filepath.Join(arguments.OutputPackagePath) + + var packageList generator.Packages + typesForGroupVersion := make(map[clientgentypes.GroupVersion][]*types.Type) + + groupVersions := make(map[string]clientgentypes.GroupVersions) + groupGoNames := make(map[string]string) + for _, inputDir := range arguments.InputDirs { + p := context.Universe.Package(vendorless(inputDir)) + + objectMeta, _, err := objectMetaForPackage(p) // TODO: ignoring internal. + if err != nil { + klog.Fatal(err) + } + if objectMeta == nil { + // no types in this package had genclient + continue + } + + var gv clientgentypes.GroupVersion + var targetGroupVersions map[string]clientgentypes.GroupVersions + + parts := strings.Split(p.Path, "/") + gv.Group = clientgentypes.Group(parts[len(parts)-2]) + gv.Version = clientgentypes.Version(parts[len(parts)-1]) + targetGroupVersions = groupVersions + + groupPackageName := gv.Group.NonEmpty() + gvPackage := path.Clean(p.Path) + + // If there's a comment of the form "// +groupName=somegroup" or + // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the + // group when generating. + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + gv.Group = clientgentypes.Group(override[0]) + } + + // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as + // the Go group identifier in CamelCase. It defaults + groupGoNames[groupPackageName] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + groupGoNames[groupPackageName] = namer.IC(override[0]) + } + + var typesToGenerate []*types.Type + for _, t := range p.Types { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if !tags.GenerateClient || tags.NoVerbs || !tags.HasVerb("list") || !tags.HasVerb("watch") { + continue + } + + typesToGenerate = append(typesToGenerate, t) + + if _, ok := typesForGroupVersion[gv]; !ok { + typesForGroupVersion[gv] = []*types.Type{} + } + typesForGroupVersion[gv] = append(typesForGroupVersion[gv], t) + } + if len(typesToGenerate) == 0 { + continue + } + + groupVersionsEntry, ok := targetGroupVersions[groupPackageName] + if !ok { + groupVersionsEntry = clientgentypes.GroupVersions{ + PackageName: groupPackageName, + Group: gv.Group, + } + } + groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, clientgentypes.PackageVersion{Version: gv.Version, Package: gvPackage}) + targetGroupVersions[groupPackageName] = groupVersionsEntry + + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + typesToGenerate = orderer.OrderTypes(typesToGenerate) + + // Generate the client and fake. + packageList = append(packageList, versionClientsPackages(versionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs)...) + + // Generate the informer factory and fake. + packageList = append(packageList, versionFactoryPackages(versionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs)...) + + // Generate the informer and fake, for each type. + packageList = append(packageList, versionInformerPackages(versionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs)...) + } + + return packageList +} + +// objectMetaForPackage returns the type of ObjectMeta used by package p. +func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) { + generatingForPackage := false + for _, t := range p.Types { + if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient { + continue + } + generatingForPackage = true + for _, member := range t.Members { + if member.Name == "ObjectMeta" { + return member.Type, isInternal(member), nil + } + } + } + if generatingForPackage { + return nil, false, fmt.Errorf("unable to find ObjectMeta for any types in package %s", p.Path) + } + return nil, false, nil +} + +// isInternal returns true if the tags for a member do not contain a json tag +func isInternal(m types.Member) bool { + return !strings.Contains(m.Tags, "json") +} + +func vendorless(p string) string { + if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { + return p[pos+len("/vendor/"):] + } + return p +} + +func typedInformerPackage(groupPkgName string, gv clientgentypes.GroupVersion, externalVersionsInformersPackage string) string { + return filepath.Join(externalVersionsInformersPackage, groupPkgName, gv.Version.String()) +} + +func versionClientsPackages(basePackage string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, customArgs *informergenargs.CustomArgs) []generator.Package { + packagePath := filepath.Join(basePackage, "client") + + vers := make([]generator.Package, 0, 2) + + // Impl + vers = append(vers, &generator.DefaultPackage{ + PackageName: "client", + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + // Impl + generators = append(generators, &clientGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "client", + }, + outputPackage: packagePath, + imports: generator.NewImportTracker(), + clientSetPackage: customArgs.VersionedClientSetPackage, + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + }) + + // Fake + vers = append(vers, &generator.DefaultPackage{ + PackageName: "fake", + PackagePath: packagePath + "/fake", + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + + // Impl + generators = append(generators, &fakeClientGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "fake", + }, + outputPackage: packagePath + "/fake", + imports: generator.NewImportTracker(), + fakeClientPkg: customArgs.VersionedClientSetPackage + "/fake", + clientInjectionPkg: packagePath, + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + }) + + return vers +} + +func versionFactoryPackages(basePackage string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, customArgs *informergenargs.CustomArgs) []generator.Package { + packagePath := filepath.Join(basePackage, "informers", groupPkgName, "factory") + + vers := make([]generator.Package, 0, 2) + + // Impl + vers = append(vers, &generator.DefaultPackage{ + PackageName: strings.ToLower(groupPkgName + "factory"), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + // Impl + generators = append(generators, &factoryGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: groupPkgName + "factory", + }, + outputPackage: packagePath, + cachingClientSetPackage: fmt.Sprintf("%s/client", basePackage), + sharedInformerFactoryPackage: customArgs.ExternalVersionsInformersPackage, + imports: generator.NewImportTracker(), + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + }) + + // Fake + vers = append(vers, &generator.DefaultPackage{ + PackageName: "fake", + PackagePath: packagePath + "/fake", + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + + // Impl + generators = append(generators, &fakeFactoryGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "fake", + }, + outputPackage: packagePath + "/fake", + factoryInjectionPkg: packagePath, + fakeClientInjectionPkg: fmt.Sprintf("%s/client/fake", basePackage), + sharedInformerFactoryPackage: customArgs.ExternalVersionsInformersPackage, + imports: generator.NewImportTracker(), + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + }) + + return vers +} + +func versionInformerPackages(basePackage string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, customArgs *informergenargs.CustomArgs) []generator.Package { + factoryPackagePath := filepath.Join(basePackage, "informers", groupPkgName, "factory") + packagePath := filepath.Join(basePackage, "informers", groupPkgName, strings.ToLower(gv.Version.NonEmpty())) + + vers := make([]generator.Package, 0, len(typesToGenerate)) + + for _, t := range typesToGenerate { + // Fix for golang iterator bug. + t := t + + packagePath := packagePath + "/" + strings.ToLower(t.Name.Name) + typedInformerPackage := typedInformerPackage(groupPkgName, gv, customArgs.ExternalVersionsInformersPackage) + + // Impl + vers = append(vers, &generator.DefaultPackage{ + PackageName: strings.ToLower(t.Name.Name), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + // Impl + generators = append(generators, &injectionGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(t.Name.Name), + }, + outputPackage: packagePath, + groupVersion: gv, + groupGoName: groupGoName, + typeToGenerate: t, + imports: generator.NewImportTracker(), + typedInformerPackage: typedInformerPackage, + groupInformerFactoryPackage: factoryPackagePath, + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + }) + + // Fake + vers = append(vers, &generator.DefaultPackage{ + PackageName: "fake", + PackagePath: packagePath + "/fake", + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + // Impl + generators = append(generators, &fakeInformerGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "fake", + }, + outputPackage: packagePath + "/fake", + imports: generator.NewImportTracker(), + typeToGenerate: t, + groupVersion: gv, + groupGoName: groupGoName, + informerInjectionPkg: packagePath, + fakeFactoryInjectionPkg: factoryPackagePath + "/fake", + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + }) + } + return vers +} diff --git a/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/main.go b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/main.go new file mode 100644 index 000000000..4120d65cc --- /dev/null +++ b/vendor/github.com/knative/pkg/codegen/cmd/injection-gen/main.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "path/filepath" + + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "github.com/knative/pkg/codegen/cmd/injection-gen/args" + "github.com/knative/pkg/codegen/cmd/injection-gen/generators" + "github.com/spf13/pflag" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/injection/informers/informers_generated" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/github.com/knative/pkg/configmap/OWNERS b/vendor/github.com/knative/pkg/configmap/OWNERS new file mode 100644 index 000000000..2480fc6d4 --- /dev/null +++ b/vendor/github.com/knative/pkg/configmap/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- configmap-approvers diff --git a/vendor/github.com/knative/pkg/configmap/testing/configmap.go b/vendor/github.com/knative/pkg/configmap/testing/configmap.go new file mode 100644 index 000000000..57646a7f8 --- /dev/null +++ b/vendor/github.com/knative/pkg/configmap/testing/configmap.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + + "github.com/ghodss/yaml" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +const ExampleKey = "_example" + +// ConfigMapFromTestFile creates a v1.ConfigMap from a YAML file +// It loads the YAML file from the testdata folder. +func ConfigMapFromTestFile(t *testing.T, name string, allowed ...string) *corev1.ConfigMap { + t.Helper() + + cm, _ := ConfigMapsFromTestFile(t, name, allowed...) + return cm +} + +// configMapsFromTestFile creates two corev1.ConfigMap resources from the config +// file read from the testdata directory: +// 1. The raw configmap read in. +// 2. A second version of the configmap augmenting `data:` with what's parsed from the value of `_example:` +func ConfigMapsFromTestFile(t *testing.T, name string, allowed ...string) (*corev1.ConfigMap, *corev1.ConfigMap) { + t.Helper() + + b, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s.yaml", name)) + if err != nil { + t.Fatalf("ReadFile() = %v", err) + } + + var orig corev1.ConfigMap + + // Use github.com/ghodss/yaml since it reads json struct + // tags so things unmarshal properly + if err := yaml.Unmarshal(b, &orig); err != nil { + t.Fatalf("yaml.Unmarshal() = %v", err) + } + + // We expect each of the allowed keys, and a key holding an example + // configuration for us to validate. + allowed = append(allowed, ExampleKey) + + if len(orig.Data) != len(allowed) { + // See here for why we only check in empty ConfigMaps: + // https://github.com/knative/serving/issues/2668 + t.Errorf("Data = %v, wanted %v", orig.Data, allowed) + } + allow := sets.NewString(allowed...) + for key := range orig.Data { + if !allow.Has(key) { + t.Errorf("Encountered key %q in %q that wasn't on the allowed list", key, name) + } + } + // With the length and membership checks, we know that the keyspace matches. + + exampleBody := orig.Data[ExampleKey] + // Check that exampleBody does not have lines that end in a trailing space, + for i, line := range strings.Split(exampleBody, "\n") { + if strings.HasSuffix(line, " ") { + t.Errorf("line %d of %q example contains trailing spaces", i, name) + } + } + + // Parse exampleBody into exemplar.Data + exemplar := orig.DeepCopy() + if err := yaml.Unmarshal([]byte(exampleBody), &exemplar.Data); err != nil { + t.Fatalf("yaml.Unmarshal() = %v", err) + } + // Augment the sample with actual configuration + for k, v := range orig.Data { + if _, ok := exemplar.Data[k]; ok { + continue + } + exemplar.Data[k] = v + } + + return &orig, exemplar +} diff --git a/vendor/github.com/knative/pkg/controller/OWNERS b/vendor/github.com/knative/pkg/controller/OWNERS new file mode 100644 index 000000000..afa22257a --- /dev/null +++ b/vendor/github.com/knative/pkg/controller/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- controller-approvers diff --git a/vendor/github.com/knative/pkg/controller/controller.go b/vendor/github.com/knative/pkg/controller/controller.go new file mode 100644 index 000000000..4471b9016 --- /dev/null +++ b/vendor/github.com/knative/pkg/controller/controller.go @@ -0,0 +1,474 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + + "github.com/knative/pkg/kmeta" + "github.com/knative/pkg/logging" + "github.com/knative/pkg/logging/logkey" +) + +const ( + falseString = "false" + trueString = "true" + + // DefaultResyncPeriod is the default duration that is used when no + // resync period is associated with a controllers initialization context. + DefaultResyncPeriod = 10 * time.Hour +) + +var ( + // DefaultThreadsPerController is the number of threads to use + // when processing the controller's workqueue. Controller binaries + // may adjust this process-wide default. For finer control, invoke + // Run on the controller directly. + DefaultThreadsPerController = 2 +) + +// Reconciler is the interface that controller implementations are expected +// to implement, so that the shared controller.Impl can drive work through it. +type Reconciler interface { + Reconcile(ctx context.Context, key string) error +} + +// PassNew makes it simple to create an UpdateFunc for use with +// cache.ResourceEventHandlerFuncs that can delegate the same methods +// as AddFunc/DeleteFunc but passing through only the second argument +// (which is the "new" object). +func PassNew(f func(interface{})) func(interface{}, interface{}) { + return func(first, second interface{}) { + f(second) + } +} + +// HandleAll wraps the provided handler function into a cache.ResourceEventHandler +// that sends all events to the given handler. For Updates, only the new object +// is forwarded. +func HandleAll(h func(interface{})) cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: h, + UpdateFunc: PassNew(h), + DeleteFunc: h, + } +} + +// Filter makes it simple to create FilterFunc's for use with +// cache.FilteringResourceEventHandler that filter based on the +// schema.GroupVersionKind of the controlling resources. +func Filter(gvk schema.GroupVersionKind) func(obj interface{}) bool { + return func(obj interface{}) bool { + if object, ok := obj.(metav1.Object); ok { + owner := metav1.GetControllerOf(object) + return owner != nil && + owner.APIVersion == gvk.GroupVersion().String() && + owner.Kind == gvk.Kind + } + return false + } +} + +// FilterWithNameAndNamespace makes it simple to create FilterFunc's for use with +// cache.FilteringResourceEventHandler that filter based on a namespace and a name. +func FilterWithNameAndNamespace(namespace, name string) func(obj interface{}) bool { + return func(obj interface{}) bool { + if object, ok := obj.(metav1.Object); ok { + return name == object.GetName() && + namespace == object.GetNamespace() + } + return false + } +} + +// Impl is our core controller implementation. It handles queuing and feeding work +// from the queue to an implementation of Reconciler. +type Impl struct { + // Reconciler is the workhorse of this controller, it is fed the keys + // from the workqueue to process. Public for testing. + Reconciler Reconciler + + // WorkQueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + WorkQueue workqueue.RateLimitingInterface + + // Sugared logger is easier to use but is not as performant as the + // raw logger. In performance critical paths, call logger.Desugar() + // and use the returned raw logger instead. In addition to the + // performance benefits, raw logger also preserves type-safety at + // the expense of slightly greater verbosity. + logger *zap.SugaredLogger + + // StatsReporter is used to send common controller metrics. + statsReporter StatsReporter +} + +// NewImpl instantiates an instance of our controller that will feed work to the +// provided Reconciler as it is enqueued. +func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string) *Impl { + return NewImplWithStats(r, logger, workQueueName, MustNewStatsReporter(workQueueName, logger)) +} + +func NewImplWithStats(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl { + return &Impl{ + Reconciler: r, + WorkQueue: workqueue.NewNamedRateLimitingQueue( + workqueue.DefaultControllerRateLimiter(), + workQueueName, + ), + logger: logger, + statsReporter: reporter, + } +} + +// EnqueueAfter takes a resource, converts it into a namespace/name string, +// and passes it to EnqueueKey. +func (c *Impl) EnqueueAfter(obj interface{}, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + c.logger.Errorw("Enqueue", zap.Error(err)) + return + } + c.EnqueueKeyAfter(key, after) +} + +// Enqueue takes a resource, converts it into a namespace/name string, +// and passes it to EnqueueKey. +func (c *Impl) Enqueue(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + c.logger.Errorw("Enqueue", zap.Error(err)) + return + } + c.EnqueueKey(key) +} + +// EnqueueControllerOf takes a resource, identifies its controller resource, +// converts it into a namespace/name string, and passes that to EnqueueKey. +func (c *Impl) EnqueueControllerOf(obj interface{}) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Error(err) + return + } + + // If we can determine the controller ref of this object, then + // add that object to our workqueue. + if owner := metav1.GetControllerOf(object); owner != nil { + c.EnqueueKey(object.GetNamespace() + "/" + owner.Name) + } +} + +// EnqueueLabelOfNamespaceScopedResource returns with an Enqueue func that +// takes a resource, identifies its controller resource through given namespace +// and name labels, converts it into a namespace/name string, and passes that +// to EnqueueKey. The controller resource must be of namespace-scoped. +func (c *Impl) EnqueueLabelOfNamespaceScopedResource(namespaceLabel, nameLabel string) func(obj interface{}) { + return func(obj interface{}) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Error(err) + return + } + + labels := object.GetLabels() + controllerKey, ok := labels[nameLabel] + if !ok { + c.logger.Debugf("Object %s/%s does not have a referring name label %s", + object.GetNamespace(), object.GetName(), nameLabel) + return + } + + if namespaceLabel != "" { + controllerNamespace, ok := labels[namespaceLabel] + if !ok { + c.logger.Debugf("Object %s/%s does not have a referring namespace label %s", + object.GetNamespace(), object.GetName(), namespaceLabel) + return + } + + c.EnqueueKey(fmt.Sprintf("%s/%s", controllerNamespace, controllerKey)) + return + } + + // Pass through namespace of the object itself if no namespace label specified. + // This is for the scenario that object and the parent resource are of same namespace, + // e.g. to enqueue the revision of an endpoint. + c.EnqueueKey(fmt.Sprintf("%s/%s", object.GetNamespace(), controllerKey)) + } +} + +// EnqueueLabelOfClusterScopedResource returns with an Enqueue func +// that takes a resource, identifies its controller resource through +// given name label, and passes it to EnqueueKey. +// The controller resource must be of cluster-scoped. +func (c *Impl) EnqueueLabelOfClusterScopedResource(nameLabel string) func(obj interface{}) { + return func(obj interface{}) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Error(err) + return + } + + labels := object.GetLabels() + controllerKey, ok := labels[nameLabel] + if !ok { + c.logger.Debugf("Object %s/%s does not have a referring name label %s", + object.GetNamespace(), object.GetName(), nameLabel) + return + } + + c.EnqueueKey(controllerKey) + } +} + +// EnqueueKey takes a namespace/name string and puts it onto the work queue. +func (c *Impl) EnqueueKey(key string) { + c.WorkQueue.Add(key) +} + +// EnqueueKeyAfter takes a namespace/name string and schedules its execution in +// the work queue after given delay. +func (c *Impl) EnqueueKeyAfter(key string, delay time.Duration) { + c.WorkQueue.AddAfter(key, delay) +} + +// Run starts the controller's worker threads, the number of which is threadiness. +// It then blocks until stopCh is closed, at which point it shuts down its internal +// work queue and waits for workers to finish processing their current work items. +func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error { + defer runtime.HandleCrash() + sg := sync.WaitGroup{} + defer sg.Wait() + defer c.WorkQueue.ShutDown() + + // Launch workers to process resources that get enqueued to our workqueue. + logger := c.logger + logger.Info("Starting controller and workers") + for i := 0; i < threadiness; i++ { + sg.Add(1) + go func() { + defer sg.Done() + for c.processNextWorkItem() { + } + }() + } + + logger.Info("Started workers") + <-stopCh + logger.Info("Shutting down workers") + + return nil +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling Reconcile on our Reconciler. +func (c *Impl) processNextWorkItem() bool { + obj, shutdown := c.WorkQueue.Get() + if shutdown { + return false + } + key := obj.(string) + + startTime := time.Now() + // Send the metrics for the current queue depth + c.statsReporter.ReportQueueDepth(int64(c.WorkQueue.Len())) + + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if + // reconcile succeeds. If a transient error occurs, we do not call + // Forget and put the item back to the queue with an increased + // delay. + defer c.WorkQueue.Done(key) + + var err error + defer func() { + status := trueString + if err != nil { + status = falseString + } + c.statsReporter.ReportReconcile(time.Since(startTime), key, status) + }() + + // Embed the key into the logger and attach that to the context we pass + // to the Reconciler. + logger := c.logger.With(zap.String(logkey.TraceId, uuid.New().String()), zap.String(logkey.Key, key)) + ctx := logging.WithLogger(context.TODO(), logger) + + // Run Reconcile, passing it the namespace/name string of the + // resource to be synced. + if err = c.Reconciler.Reconcile(ctx, key); err != nil { + c.handleErr(err, key) + logger.Infof("Reconcile failed. Time taken: %v.", time.Since(startTime)) + return true + } + + // Finally, if no error occurs we Forget this item so it does not + // have any delay when another change happens. + c.WorkQueue.Forget(key) + logger.Infof("Reconcile succeeded. Time taken: %v.", time.Since(startTime)) + + return true +} + +func (c *Impl) handleErr(err error, key string) { + c.logger.Errorw("Reconcile error", zap.Error(err)) + + // Re-queue the key if it's an transient error. + if !IsPermanentError(err) { + c.WorkQueue.AddRateLimited(key) + return + } + + c.WorkQueue.Forget(key) +} + +// GlobalResync enqueues all objects from the passed SharedInformer +func (c *Impl) GlobalResync(si cache.SharedInformer) { + for _, key := range si.GetStore().ListKeys() { + c.EnqueueKey(key) + } +} + +// NewPermanentError returns a new instance of permanentError. +// Users can wrap an error as permanentError with this in reconcile, +// when he does not expect the key to get re-queued. +func NewPermanentError(err error) error { + return permanentError{e: err} +} + +// permanentError is an error that is considered not transient. +// We should not re-queue keys when it returns with thus error in reconcile. +type permanentError struct { + e error +} + +// IsPermanentError returns true if given error is permanentError +func IsPermanentError(err error) bool { + switch err.(type) { + case permanentError: + return true + default: + return false + } +} + +// Error implements the Error() interface of error. +func (err permanentError) Error() string { + if err.e == nil { + return "" + } + + return err.e.Error() +} + +// Informer is the group of methods that a type must implement to be passed to +// StartInformers. +type Informer interface { + Run(<-chan struct{}) + HasSynced() bool +} + +// StartInformers kicks off all of the passed informers and then waits for all +// of them to synchronize. +func StartInformers(stopCh <-chan struct{}, informers ...Informer) error { + for _, informer := range informers { + informer := informer + go informer.Run(stopCh) + } + + for i, informer := range informers { + if ok := cache.WaitForCacheSync(stopCh, informer.HasSynced); !ok { + return fmt.Errorf("Failed to wait for cache at index %d to sync", i) + } + } + return nil +} + +// StartAll kicks off all of the passed controllers with DefaultThreadsPerController. +func StartAll(stopCh <-chan struct{}, controllers ...*Impl) { + wg := sync.WaitGroup{} + // Start all of the controllers. + for _, ctrlr := range controllers { + wg.Add(1) + go func(c *Impl) { + defer wg.Done() + c.Run(DefaultThreadsPerController, stopCh) + }(ctrlr) + } + wg.Wait() +} + +// This is attached to contexts passed to controller constructors to associate +// a resync period. +type resyncPeriodKey struct{} + +// WithResyncPeriod associates the given resync period with the given context in +// the context that is returned. +func WithResyncPeriod(ctx context.Context, resync time.Duration) context.Context { + return context.WithValue(ctx, resyncPeriodKey{}, resync) +} + +// GetResyncPeriod returns the resync period associated with the given context. +// When none is specified a default resync period is used. +func GetResyncPeriod(ctx context.Context) time.Duration { + rp := ctx.Value(resyncPeriodKey{}) + if rp == nil { + return DefaultResyncPeriod + } + return rp.(time.Duration) +} + +// GetTrackerLease fetches the tracker lease from the controller context. +func GetTrackerLease(ctx context.Context) time.Duration { + return 3 * GetResyncPeriod(ctx) +} + +// erKey is used to associate record.EventRecorders with contexts. +type erKey struct{} + +// WithEventRecorder attaches the given record.EventRecorder to the provided context +// in the returned context. +func WithEventRecorder(ctx context.Context, er record.EventRecorder) context.Context { + return context.WithValue(ctx, erKey{}, er) +} + +// GetEventRecorder attempts to look up the record.EventRecorder on a given context. +// It may return null if none is found. +func GetEventRecorder(ctx context.Context) record.EventRecorder { + untyped := ctx.Value(erKey{}) + if untyped == nil { + return nil + } + return untyped.(record.EventRecorder) +} diff --git a/vendor/github.com/knative/pkg/controller/helper.go b/vendor/github.com/knative/pkg/controller/helper.go new file mode 100644 index 000000000..887d715ea --- /dev/null +++ b/vendor/github.com/knative/pkg/controller/helper.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + + "github.com/knative/pkg/kmeta" +) + +type Callback func(interface{}) + +func EnsureTypeMeta(f Callback, gvk schema.GroupVersionKind) Callback { + apiVersion, kind := gvk.ToAPIVersionAndKind() + + return func(untyped interface{}) { + typed, err := kmeta.DeletionHandlingAccessor(untyped) + if err != nil { + // TODO: We should consider logging here. + return + } + // We need to populated TypeMeta, but cannot trample the + // informer's copy. + // TODO(mattmoor): Avoid the copy if TypeMeta is set. + copy := typed.DeepCopyObject() + + accessor, err := meta.TypeAccessor(copy) + if err != nil { + return + } + accessor.SetAPIVersion(apiVersion) + accessor.SetKind(kind) + + // Pass in the mutated copy (accessor is not just a type cast) + f(copy) + } +} + +// SendGlobalUpdates triggers an update event for all objects from the +// passed SharedInformer. +// +// Since this is triggered not by a real update of these objects +// themselves, we have no way of knowing the change to these objects +// if any, so we call handler.OnUpdate(obj, obj) for all of them +// regardless if they have changes or not. +func SendGlobalUpdates(si cache.SharedInformer, handler cache.ResourceEventHandler) { + store := si.GetStore() + for _, obj := range store.List() { + handler.OnUpdate(obj, obj) + } +} diff --git a/vendor/github.com/knative/pkg/controller/stats_reporter.go b/vendor/github.com/knative/pkg/controller/stats_reporter.go new file mode 100644 index 000000000..2dff988ed --- /dev/null +++ b/vendor/github.com/knative/pkg/controller/stats_reporter.go @@ -0,0 +1,148 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "time" + + "github.com/knative/pkg/metrics" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.uber.org/zap" +) + +var ( + workQueueDepthStat = stats.Int64("work_queue_depth", "Depth of the work queue", stats.UnitNone) + reconcileCountStat = stats.Int64("reconcile_count", "Number of reconcile operations", stats.UnitNone) + reconcileLatencyStat = stats.Int64("reconcile_latency", "Latency of reconcile operations", stats.UnitMilliseconds) + + // reconcileDistribution defines the bucket boundaries for the histogram of reconcile latency metric. + // Bucket boundaries are 10ms, 100ms, 1s, 10s, 30s and 60s. + reconcileDistribution = view.Distribution(10, 100, 1000, 10000, 30000, 60000) + + // Create the tag keys that will be used to add tags to our measurements. + // Tag keys must conform to the restrictions described in + // go.opencensus.io/tag/validate.go. Currently those restrictions are: + // - length between 1 and 255 inclusive + // - characters are printable US-ASCII + reconcilerTagKey = mustNewTagKey("reconciler") + keyTagKey = mustNewTagKey("key") + successTagKey = mustNewTagKey("success") +) + +func init() { + // Create views to see our measurements. This can return an error if + // a previously-registered view has the same name with a different value. + // View name defaults to the measure name if unspecified. + err := view.Register( + &view.View{ + Description: "Depth of the work queue", + Measure: workQueueDepthStat, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{reconcilerTagKey}, + }, + &view.View{ + Description: "Number of reconcile operations", + Measure: reconcileCountStat, + Aggregation: view.Count(), + TagKeys: []tag.Key{reconcilerTagKey, keyTagKey, successTagKey}, + }, + &view.View{ + Description: "Latency of reconcile operations", + Measure: reconcileLatencyStat, + Aggregation: reconcileDistribution, + TagKeys: []tag.Key{reconcilerTagKey, keyTagKey, successTagKey}, + }, + ) + if err != nil { + panic(err) + } +} + +// StatsReporter defines the interface for sending metrics +type StatsReporter interface { + // ReportQueueDepth reports the queue depth metric + ReportQueueDepth(v int64) error + + // ReportReconcile reports the count and latency metrics for a reconcile operation + ReportReconcile(duration time.Duration, key, success string) error +} + +// Reporter holds cached metric objects to report metrics +type reporter struct { + reconciler string + globalCtx context.Context +} + +// NewStatsReporter creates a reporter that collects and reports metrics +func NewStatsReporter(reconciler string) (StatsReporter, error) { + // Reconciler tag is static. Create a context containing that and cache it. + ctx, err := tag.New( + context.Background(), + tag.Insert(reconcilerTagKey, reconciler)) + if err != nil { + return nil, err + } + + return &reporter{reconciler: reconciler, globalCtx: ctx}, nil +} + +// MustNewStatsReporter creates a new instance of StatsReporter. +// Logs fatally if creation fails. +func MustNewStatsReporter(reconciler string, logger *zap.SugaredLogger) StatsReporter { + stats, err := NewStatsReporter(reconciler) + if err != nil { + logger.Fatalw("Failed to initialize the stats reporter", zap.Error(err)) + } + return stats +} + +// ReportQueueDepth reports the queue depth metric +func (r *reporter) ReportQueueDepth(v int64) error { + if r.globalCtx == nil { + return errors.New("reporter is not initialized correctly") + } + metrics.Record(r.globalCtx, workQueueDepthStat.M(v)) + return nil +} + +// ReportReconcile reports the count and latency metrics for a reconcile operation +func (r *reporter) ReportReconcile(duration time.Duration, key, success string) error { + ctx, err := tag.New( + context.Background(), + tag.Insert(reconcilerTagKey, r.reconciler), + tag.Insert(keyTagKey, key), + tag.Insert(successTagKey, success)) + if err != nil { + return err + } + + metrics.Record(ctx, reconcileCountStat.M(1)) + metrics.Record(ctx, reconcileLatencyStat.M(int64(duration/time.Millisecond))) + return nil +} + +func mustNewTagKey(s string) tag.Key { + tagKey, err := tag.NewKey(s) + if err != nil { + panic(err) + } + return tagKey +} diff --git a/vendor/github.com/knative/pkg/controller/testing/fake_stats_reporter.go b/vendor/github.com/knative/pkg/controller/testing/fake_stats_reporter.go new file mode 100644 index 000000000..506754dc1 --- /dev/null +++ b/vendor/github.com/knative/pkg/controller/testing/fake_stats_reporter.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "sync" + "time" +) + +// FakeStatsReporter is a fake implementation of StatsReporter +type FakeStatsReporter struct { + queueDepths []int64 + reconcileData []FakeReconcileStatData + Lock sync.Mutex +} + +// FakeReconcileStatData is used to record the calls to ReportReconcile +type FakeReconcileStatData struct { + Duration time.Duration + Key, Success string +} + +// ReportQueueDepth records the call and returns success. +func (r *FakeStatsReporter) ReportQueueDepth(v int64) error { + r.Lock.Lock() + defer r.Lock.Unlock() + r.queueDepths = append(r.queueDepths, v) + return nil +} + +// ReportReconcile records the call and returns success. +func (r *FakeStatsReporter) ReportReconcile(duration time.Duration, key, success string) error { + r.Lock.Lock() + defer r.Lock.Unlock() + r.reconcileData = append(r.reconcileData, FakeReconcileStatData{duration, key, success}) + return nil +} + +// GetQueueDepths returns the recorded queue depth values +func (r *FakeStatsReporter) GetQueueDepths() []int64 { + r.Lock.Lock() + defer r.Lock.Unlock() + return r.queueDepths +} + +// GetReconcileData returns the recorded reconcile data +func (r *FakeStatsReporter) GetReconcileData() []FakeReconcileStatData { + r.Lock.Lock() + defer r.Lock.Unlock() + return r.reconcileData +} diff --git a/vendor/github.com/knative/pkg/hack/OWNERS b/vendor/github.com/knative/pkg/hack/OWNERS new file mode 100644 index 000000000..c50adc849 --- /dev/null +++ b/vendor/github.com/knative/pkg/hack/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/github.com/knative/pkg/hack/boilerplate/boilerplate.go.txt b/vendor/github.com/knative/pkg/hack/boilerplate/boilerplate.go.txt new file mode 100644 index 000000000..1f43b023a --- /dev/null +++ b/vendor/github.com/knative/pkg/hack/boilerplate/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/vendor/github.com/knative/pkg/hack/generate-knative.sh b/vendor/github.com/knative/pkg/hack/generate-knative.sh new file mode 100755 index 000000000..d13d49108 --- /dev/null +++ b/vendor/github.com/knative/pkg/hack/generate-knative.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Knative Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# generate-groups generates everything for a project with external types only, e.g. a project based +# on CustomResourceDefinitions. + +if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then + cat < ... + + the generators comma separated to run (deepcopy,defaulter,client,lister,informer) or "all". + the output package name (e.g. github.com/example/project/pkg/generated). + the external types dir (e.g. github.com/example/api or github.com/example/project/pkg/apis). + the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative + to . + ... arbitrary flags passed to all generator binaries. + + +Examples: + $(basename $0) all github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" + $(basename $0) injection,foo github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" +EOF + exit 0 +fi + +GENS="$1" +OUTPUT_PKG="$2" +APIS_PKG="$3" +GROUPS_WITH_VERSIONS="$4" +shift 4 + +( + # To support running this script from anywhere, we have to first cd into this directory + # so we can install the tools. + cd $(dirname "${0}") + go install ../codegen/cmd/injection-gen +) + +function codegen::join() { local IFS="$1"; shift; echo "$*"; } + +# enumerate group versions +FQ_APIS=() # e.g. k8s.io/api/apps/v1 +for GVs in ${GROUPS_WITH_VERSIONS}; do + IFS=: read G Vs <<<"${GVs}" + + # enumerate versions + for V in ${Vs//,/ }; do + FQ_APIS+=(${APIS_PKG}/${G}/${V}) + done +done + +if grep -qw "injection" <<<"${GENS}"; then + echo "Generating injection for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/injection" + + ${GOPATH}/bin/injection-gen \ + --input-dirs $(codegen::join , "${FQ_APIS[@]}") \ + --versioned-clientset-package ${OUTPUT_PKG}/clientset/versioned \ + --external-versions-informers-package ${OUTPUT_PKG}/informers/externalversions \ + --output-package ${OUTPUT_PKG}/injection \ + "$@" +fi diff --git a/vendor/github.com/knative/pkg/hack/update-codegen.sh b/vendor/github.com/knative/pkg/hack/update-codegen.sh new file mode 100755 index 000000000..ed4695d85 --- /dev/null +++ b/vendor/github.com/knative/pkg/hack/update-codegen.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh + +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} + +go install ./vendor/k8s.io/code-generator/cmd/deepcopy-gen + +# generate the code with: +# --output-base because this script should also be able to run inside the vendor dir of +# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir +# instead of the $GOPATH directly. For normal projects this can be dropped. +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/knative/pkg/client github.com/knative/pkg/apis \ + "istio:v1alpha3 istio/authentication:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Knative Injection +${REPO_ROOT_DIR}/hack/generate-knative.sh "injection" \ + github.com/knative/pkg/client github.com/knative/pkg/apis \ + "istio:v1alpha3 istio/authentication:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Only deepcopy the Duck types, as they are not real resources. +${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ + github.com/knative/pkg/client github.com/knative/pkg/apis \ + "duck:v1alpha1,v1beta1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Depends on generate-groups.sh to install bin/deepcopy-gen +${GOPATH}/bin/deepcopy-gen --input-dirs \ + github.com/knative/pkg/apis,github.com/knative/pkg/logging,github.com/knative/pkg/testing \ + -O zz_generated.deepcopy \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Make sure our dependencies are up-to-date +${REPO_ROOT_DIR}/hack/update-deps.sh diff --git a/vendor/github.com/knative/pkg/hack/update-deps.sh b/vendor/github.com/knative/pkg/hack/update-deps.sh new file mode 100755 index 000000000..2213d6510 --- /dev/null +++ b/vendor/github.com/knative/pkg/hack/update-deps.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh + +cd ${REPO_ROOT_DIR} + +# Ensure we have everything we need under vendor/ +dep ensure + +rm -rf $(find vendor/ -name 'OWNERS') +rm -rf $(find vendor/ -name '*_test.go') diff --git a/vendor/github.com/knative/pkg/hack/verify-codegen.sh b/vendor/github.com/knative/pkg/hack/verify-codegen.sh new file mode 100755 index 000000000..18bbe693f --- /dev/null +++ b/vendor/github.com/knative/pkg/hack/verify-codegen.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh + +readonly TMP_DIFFROOT="$(mktemp -d ${REPO_ROOT_DIR}/tmpdiffroot.XXXXXX)" + +cleanup() { + rm -rf "${TMP_DIFFROOT}" +} + +trap "cleanup" EXIT SIGINT + +cleanup + +# Save working tree state +mkdir -p "${TMP_DIFFROOT}" + +cp -aR \ + "${REPO_ROOT_DIR}/Gopkg.lock" \ + "${REPO_ROOT_DIR}/apis" \ + "${REPO_ROOT_DIR}/logging" \ + "${REPO_ROOT_DIR}/testing" \ + "${TMP_DIFFROOT}" + +"${REPO_ROOT_DIR}/hack/update-codegen.sh" +echo "Diffing ${REPO_ROOT_DIR} against freshly generated codegen" +ret=0 + +diff -Naupr --no-dereference \ + "${REPO_ROOT_DIR}/Gopkg.lock" "${TMP_DIFFROOT}/Gopkg.lock" || ret=1 + +diff -Naupr --no-dereference \ + "${REPO_ROOT_DIR}/apis" "${TMP_DIFFROOT}/apis" || ret=1 + +diff -Naupr --no-dereference \ + "${REPO_ROOT_DIR}/logging" "${TMP_DIFFROOT}/logging" || ret=1 + +diff -Naupr --no-dereference \ + "${REPO_ROOT_DIR}/testing" "${TMP_DIFFROOT}/testing" || ret=1 + +# Restore working tree state +rm -fr \ + "${REPO_ROOT_DIR}/Gopkg.lock" \ + "${REPO_ROOT_DIR}/apis" \ + "${REPO_ROOT_DIR}/logging" \ + "${REPO_ROOT_DIR}/testing" + +cp -aR "${TMP_DIFFROOT}"/* "${REPO_ROOT_DIR}" + +if [[ $ret -eq 0 ]] +then + echo "${REPO_ROOT_DIR} up to date." + else + echo "${REPO_ROOT_DIR} is out of date. Please run hack/update-codegen.sh" + exit 1 +fi diff --git a/vendor/github.com/knative/pkg/injection/OWNERS b/vendor/github.com/knative/pkg/injection/OWNERS new file mode 100644 index 000000000..dda47512a --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/OWNERS @@ -0,0 +1,5 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- mattmoor +- n3wscott diff --git a/vendor/github.com/knative/pkg/injection/README.md b/vendor/github.com/knative/pkg/injection/README.md new file mode 100644 index 000000000..fce4aefc3 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/README.md @@ -0,0 +1,218 @@ +# Knative Dependency Injection + +This library supports the production of controller processes with minimal +boilerplate outside of the reconciler implementation. + +## Building Controllers + +To adopt this model of controller construction, implementations should start +with the following controller constructor: + +```go +import ( + "context" + + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/controller" + "github.com/knative/pkg/logging" +) + +func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + logger := logging.FromContext(ctx) + + // TODO(you): Access informers + + c := &Reconciler{ + // TODO(you): Pass listers, clients, and other stuff. + } + impl := controller.NewImpl(c, logger, "NameOfController") + + // TODO(you): Set up event handlers. + + return impl +} +``` + +## Consuming Informers + +Knative controllers use "informers" to set up the various event hooks needed to +queue work, and pass the "listers" fed by the informers' caches to the nested +"Reconciler" for accessing objects. + +Our controller constructor is passed a `context.Context` onto which we inject +any informers we access. The accessors for these informers are in little stub +libraries, which we have hand rolled for Kubernetes (more on how to generate +these below). + +```go +import ( + // These are how you access a client or informer off of the "ctx" passed + // to set up the controller. + "github.com/knative/pkg/injection/clients/kubeclient" + svcinformer "github.com/knative/pkg/injection/informers/kubeinformers/corev1/service" + + // Other imports ... +) + +func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + logger := logging.FromContext(ctx) + + // Access informers + svcInformer := svcinformer.Get(ctx) + + c := &Reconciler{ + // Pass the lister and client to the Reconciler. + Client: kubeclient.Get(ctx), + ServiceLister: svcInformer.Lister(), + } + impl := controller.NewImpl(c, logger, "NameOfController") + + // Set up event handlers. + svcInformer.Informer().AddEventHandler(...) + + return impl +} + +``` + +> How it works: by importing the accessor for a client or informer we link it +> and trigger the `init()` method for its package to run at startup. Each of +> these libraries registers themselves similar to our `init()` and controller +> processes can leverage this to setup and inject all of the registered things +> onto a context to pass to your `NewController()`. + +## Testing Controllers + +Similar to `injection.Default`, we also have `injection.Fake`. While linking the +normal accessors sets up the former, linking their fakes set up the latter. + +``` +import ( + "testing" + + // Link the fakes for any informers our controller accesses. + _ "github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/fake" + + "k8s.io/client-go/rest" + "github.com/knative/pkg/injection" + logtesting "github.com/knative/pkg/logging/testing" +) + +func TestFoo(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + + // Setup a context from all of the injected fakes. + ctx, _ = injection.Fake.SetupInformers(ctx, &rest.Config{}) + cmw := configmap.NewStaticWatcher(...) + ctrl := NewController(ctx, cmw) + + // Test the controller process. +} +``` + +The fake clients also support manually setting up contexts seeded with objects: + +``` +import ( + "testing" + + fakekubeclient "github.com/knative/pkg/injection/clients/kubeclient/fake" + + "k8s.io/client-go/rest" + "github.com/knative/pkg/injection" + logtesting "github.com/knative/pkg/logging/testing" +) + +func TestFoo(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + + objs := []runtime.Object{ + // Some list of initial objects in the client. + } + + ctx, kubeClient := fakekubeclient.With(ctx, objs...) + + // The fake clients returned by our library are the actual fake type, + // which enables us to access test-specific methods, e.g. + kubeClient.AppendReactor(...) + + c := &Reconciler{ + Client: kubeClient, + } + + // Test the reconciler... +} +``` + +## Starting controllers + +All we do is import the controller packages and pass their constructors along +with a component name to our shared main. Then our shared main method sets it +all up and runs our controllers. + +```go +package main + +import ( + // The set of controllers this process will run. + "github.com/knative/foo/pkg/reconciler/bar" + "github.com/knative/baz/pkg/reconciler/blah" + + // This defines the shared main for injected controllers. + "github.com/knative/pkg/injection/sharedmain" +) + +func main() { + sharedmain.Main("component-name", + bar.NewController, + blah.NewController, + ) +} + +``` + +## Generating Injection Stubs. + +To make generating stubs simple, we have harnessed the Kubernetes +code-generation tooling to produce `injection-gen`. Similar to how you might +ordinarily run the other `foo-gen` processed: + +```shell +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} + +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/knative/sample-controller/pkg/client github.com/knative/sample-controller/pkg/apis \ + "samples:v1alpha1" \ + --go-header-file ${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt +``` + +To run `injection-gen` you run the following (replacing the import path and api +group): + +```shell + +KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT}; ls -d -1 ./vendor/github.com/knative/pkg 2>/dev/null || echo ../pkg)} + +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + github.com/knative/sample-controller/pkg/client github.com/knative/sample-controller/pkg/apis \ + "samples:v1alpha1" \ + --go-header-file ${REPO_ROOT}/hack/boilerplate/boilerplate.go.txt + +``` + +To ensure the appropriate tooling is vendored, add the following to +`Gopkg.toml`: + +```toml +required = [ + "github.com/knative/pkg/codegen/cmd/injection-gen", +] + +# .. Constraints + +# Keeps things like the generate-knative.sh script +[[prune.project]] + name = "github.com/knative/pkg" + unused-packages = false + non-go = false +``` diff --git a/vendor/github.com/knative/pkg/injection/clients.go b/vendor/github.com/knative/pkg/injection/clients.go new file mode 100644 index 000000000..5c464924c --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/clients.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" + + "k8s.io/client-go/rest" +) + +// ClientInjector holds the type of a callback that attaches a particular +// client type to a context. +type ClientInjector func(context.Context, *rest.Config) context.Context + +func (i *impl) RegisterClient(ci ClientInjector) { + i.m.Lock() + defer i.m.Unlock() + + i.clients = append(i.clients, ci) +} + +func (i *impl) GetClients() []ClientInjector { + i.m.RLock() + defer i.m.RUnlock() + + // Copy the slice before returning. + return append(i.clients[:0:0], i.clients...) +} diff --git a/vendor/github.com/knative/pkg/injection/clients/apiextclient/apiext.go b/vendor/github.com/knative/pkg/injection/clients/apiextclient/apiext.go new file mode 100644 index 000000000..258ce7ff0 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/clients/apiextclient/apiext.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiextclient + +import ( + "context" + + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/client-go/rest" + + "github.com/knative/pkg/injection" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, clientset.NewForConfigOrDie(cfg)) +} + +// Get extracts the Kubernetes Api Extensions client from the context. +func Get(ctx context.Context) clientset.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (clientset.Interface)(nil)) + } + return untyped.(clientset.Interface) +} diff --git a/vendor/github.com/knative/pkg/injection/clients/apiextclient/fake/fake.go b/vendor/github.com/knative/pkg/injection/clients/apiextclient/fake/fake.go new file mode 100644 index 000000000..ab3dc6815 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/clients/apiextclient/fake/fake.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/apiextclient" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, apiextclient.Key{}, cs), cs +} + +// Get extracts the Kubernetes Api Extensions client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(apiextclient.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (*fake.Clientset)(nil)) + } + return untyped.(*fake.Clientset) +} diff --git a/vendor/github.com/knative/pkg/injection/clients/dynamicclient/dynamicclient.go b/vendor/github.com/knative/pkg/injection/clients/dynamicclient/dynamicclient.go new file mode 100644 index 000000000..27e55b8a4 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/clients/dynamicclient/dynamicclient.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamicclient + +import ( + "context" + + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + "github.com/knative/pkg/injection" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, dynamic.NewForConfigOrDie(cfg)) +} + +// Get extracts the Dynamic client from the context. +func Get(ctx context.Context) dynamic.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (dynamic.Interface)(nil)) + } + return untyped.(dynamic.Interface) +} diff --git a/vendor/github.com/knative/pkg/injection/clients/dynamicclient/fake/fake.go b/vendor/github.com/knative/pkg/injection/clients/dynamicclient/fake/fake.go new file mode 100644 index 000000000..62759e614 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/clients/dynamicclient/fake/fake.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/rest" + + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/dynamicclient" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx, runtime.NewScheme()) + return ctx +} + +func With(ctx context.Context, scheme *runtime.Scheme, objects ...runtime.Object) (context.Context, *fake.FakeDynamicClient) { + cs := fake.NewSimpleDynamicClient(scheme, objects...) + return context.WithValue(ctx, dynamicclient.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.FakeDynamicClient { + untyped := ctx.Value(dynamicclient.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (*fake.FakeDynamicClient)(nil)) + } + return untyped.(*fake.FakeDynamicClient) +} diff --git a/vendor/github.com/knative/pkg/injection/clients/kubeclient/fake/fake.go b/vendor/github.com/knative/pkg/injection/clients/kubeclient/fake/fake.go new file mode 100644 index 000000000..1c7806191 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/clients/kubeclient/fake/fake.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/kubeclient" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, kubeclient.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(kubeclient.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (*fake.Clientset)(nil)) + } + return untyped.(*fake.Clientset) +} diff --git a/vendor/github.com/knative/pkg/injection/clients/kubeclient/kubeclient.go b/vendor/github.com/knative/pkg/injection/clients/kubeclient/kubeclient.go new file mode 100644 index 000000000..1cc05f64d --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/clients/kubeclient/kubeclient.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeclient + +import ( + "context" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/knative/pkg/injection" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, kubernetes.NewForConfigOrDie(cfg)) +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) kubernetes.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (kubernetes.Interface)(nil)) + } + return untyped.(kubernetes.Interface) +} diff --git a/vendor/github.com/knative/pkg/injection/doc.go b/vendor/github.com/knative/pkg/injection/doc.go new file mode 100644 index 000000000..857e3d728 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/doc.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package injection defines the mechanisms through which clients, informers +// and shared informer factories are injected into a shared controller binary +// implementation. +// +// There are two primary contexts where the usage of the injection package is +// interesting. The first is in the context of implementations of +// `controller.Reconciler` being wrapped in a `*controller.Impl`: +// +// import ( +// // Simply linking this triggers the injection of the informer, which links +// // the factory triggering its injection, and which links the client, +// // triggering its injection. All you need to know is that it works :) +// deployinformer "github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment" +// "github.com/knative/pkg/injection" +// ) +// +// func NewController(ctx context.Context) *controller.Impl { +// deploymentInformer := deployinformer.Get(ctx) +// // Pass deploymentInformer.Lister() to Reconciler +// ... +// // Set up events on deploymentInformer.Informer() +// ... +// } +// +// Then in `package main` the entire controller process can be set up via: +// +// package main +// +// import ( +// // The set of controllers this controller process runs. +// // Linking these will register their transitive dependencies, after +// // which the shared main can set up the rest. +// "github.com/knative/foo/pkg/reconciler/matt" +// "github.com/knative/foo/pkg/reconciler/scott" +// "github.com/knative/foo/pkg/reconciler/ville" +// "github.com/knative/foo/pkg/reconciler/dave" +// +// // This defines the shared main for injected controllers. +// "github.com/knative/pkg/injection/sharedmain" +// ) +// +// func main() { +// sharedmain.Main("my-component", +// // We pass in the list of controllers to construct, and that's it! +// // If we forget to add this, go will complain about the unused import. +// matt.NewController, +// scott.NewController, +// ville.NewController, +// dave.NewController, +// ) +// } +package injection diff --git a/vendor/github.com/knative/pkg/injection/factories.go b/vendor/github.com/knative/pkg/injection/factories.go new file mode 100644 index 000000000..fc913612a --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/factories.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" +) + +// InformerFactoryInjector holds the type of a callback that attaches a particular +// factory type to a context. +type InformerFactoryInjector func(context.Context) context.Context + +func (i *impl) RegisterInformerFactory(ifi InformerFactoryInjector) { + i.m.Lock() + defer i.m.Unlock() + + i.factories = append(i.factories, ifi) +} + +func (i *impl) GetInformerFactories() []InformerFactoryInjector { + i.m.RLock() + defer i.m.RUnlock() + + // Copy the slice before returning. + return append(i.factories[:0:0], i.factories...) +} diff --git a/vendor/github.com/knative/pkg/injection/informers.go b/vendor/github.com/knative/pkg/injection/informers.go new file mode 100644 index 000000000..0425ae498 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" + + "k8s.io/client-go/rest" + + "github.com/knative/pkg/controller" +) + +// InformerInjector holds the type of a callback that attaches a particular +// informer type to a context. +type InformerInjector func(context.Context) (context.Context, controller.Informer) + +func (i *impl) RegisterInformer(ii InformerInjector) { + i.m.Lock() + defer i.m.Unlock() + + i.informers = append(i.informers, ii) +} + +func (i *impl) GetInformers() []InformerInjector { + i.m.RLock() + defer i.m.RUnlock() + + // Copy the slice before returning. + return append(i.informers[:0:0], i.informers...) +} + +func (i *impl) SetupInformers(ctx context.Context, cfg *rest.Config) (context.Context, []controller.Informer) { + // Based on the reconcilers we have linked, build up a set of clients and inject + // them onto the context. + for _, ci := range i.GetClients() { + ctx = ci(ctx, cfg) + } + + // Based on the reconcilers we have linked, build up a set of informer factories + // and inject them onto the context. + for _, ifi := range i.GetInformerFactories() { + ctx = ifi(ctx) + } + + // Based on the reconcilers we have linked, build up a set of informers + // and inject them onto the context. + var inf controller.Informer + informers := make([]controller.Informer, 0, len(i.GetInformers())) + for _, ii := range i.GetInformers() { + ctx, inf = ii(ctx) + informers = append(informers, inf) + } + return ctx, informers +} diff --git a/vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/crd.go b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/crd.go new file mode 100644 index 000000000..3ea6e9fb3 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/crd.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "context" + + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/apiextinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Apiextensions().V1beta1().CustomResourceDefinitions() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Deployment informer from the context. +func Get(ctx context.Context) apiextv1beta1.CustomResourceDefinitionInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (apiextv1beta1.CustomResourceDefinitionInformer)(nil)) + } + return untyped.(apiextv1beta1.CustomResourceDefinitionInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/fake/fake.go new file mode 100644 index 000000000..d501712c4 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/apiextinformers/apiextensionsv1beta1/crd" + "github.com/knative/pkg/injection/informers/apiextinformers/factory/fake" +) + +var Get = crd.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Apiextensions().V1beta1().CustomResourceDefinitions() + return context.WithValue(ctx, crd.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/factory.go b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/factory.go new file mode 100644 index 000000000..8ce6976b9 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/factory.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "context" + + "github.com/knative/pkg/logging" + + informers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/apiextclient" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + axc := apiextclient.Get(ctx) + return context.WithValue(ctx, Key{}, + informers.NewSharedInformerFactory(axc, controller.GetResyncPeriod(ctx))) +} + +// Get extracts the Kubernetes Api Extensions InformerFactory from the context. +func Get(ctx context.Context) informers.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (informers.SharedInformerFactory)(nil)) + } + return untyped.(informers.SharedInformerFactory) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/fake/fake.go new file mode 100644 index 000000000..ec3d3d6aa --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/apiextinformers/factory/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + informers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/apiextclient/fake" + "github.com/knative/pkg/injection/informers/apiextinformers/factory" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + kc := fake.Get(ctx) + return context.WithValue(ctx, factory.Key{}, + informers.NewSharedInformerFactory(kc, controller.GetResyncPeriod(ctx))) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/deployment.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/deployment.go new file mode 100644 index 000000000..6f712497c --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/deployment.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + + appsv1 "k8s.io/client-go/informers/apps/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Apps().V1().Deployments() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Deployment informer from the context. +func Get(ctx context.Context) appsv1.DeploymentInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (appsv1.DeploymentInformer)(nil)) + } + return untyped.(appsv1.DeploymentInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/fake/fake.go new file mode 100644 index 000000000..51cebf9e8 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/appsv1/deployment" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = deployment.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Apps().V1().Deployments() + return context.WithValue(ctx, deployment.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/fake/fake.go new file mode 100644 index 000000000..4e33caed3 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = hpa.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Autoscaling().V1().HorizontalPodAutoscalers() + return context.WithValue(ctx, hpa.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/hpa.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/hpa.go new file mode 100644 index 000000000..59d361524 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv1/hpa/hpa.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hpa + +import ( + "context" + + autoscalingv1 "k8s.io/client-go/informers/autoscaling/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Autoscaling().V1().HorizontalPodAutoscalers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Hpa informer from the context. +func Get(ctx context.Context) autoscalingv1.HorizontalPodAutoscalerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (autoscalingv1.HorizontalPodAutoscalerInformer)(nil)) + } + return untyped.(autoscalingv1.HorizontalPodAutoscalerInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/fake/fake.go new file mode 100644 index 000000000..5a9fabd0c --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = hpa.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Autoscaling().V2beta1().HorizontalPodAutoscalers() + return context.WithValue(ctx, hpa.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/hpa.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/hpa.go new file mode 100644 index 000000000..5e2f482a7 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/autoscalingv2beta1/hpa/hpa.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hpa + +import ( + "context" + + autoscalingv2beta1 "k8s.io/client-go/informers/autoscaling/v2beta1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Autoscaling().V2beta1().HorizontalPodAutoscalers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Hpa informer from the context. +func Get(ctx context.Context) autoscalingv2beta1.HorizontalPodAutoscalerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (autoscalingv2beta1.HorizontalPodAutoscalerInformer)(nil)) + } + return untyped.(autoscalingv2beta1.HorizontalPodAutoscalerInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/fake/fake.go new file mode 100644 index 000000000..a0cb5d729 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = job.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Batch().V1().Jobs() + return context.WithValue(ctx, job.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/job.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/job.go new file mode 100644 index 000000000..422b1efe9 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/batchv1/job/job.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package job + +import ( + "context" + + batchv1 "k8s.io/client-go/informers/batch/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Batch().V1().Jobs() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Job informer from the context. +func Get(ctx context.Context) batchv1.JobInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (batchv1.JobInformer)(nil)) + } + return untyped.(batchv1.JobInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/configmap.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/configmap.go new file mode 100644 index 000000000..815d64fde --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/configmap.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configmap + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().ConfigMaps() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes ConfigMap informer from the context. +func Get(ctx context.Context) corev1.ConfigMapInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.ConfigMapInformer)(nil)) + } + return untyped.(corev1.ConfigMapInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/fake/fake.go new file mode 100644 index 000000000..089cb95f0 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/corev1/configmap" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = configmap.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().ConfigMaps() + return context.WithValue(ctx, configmap.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/endpoints.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/endpoints.go new file mode 100644 index 000000000..e3bb337c5 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/endpoints.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Endpoints() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Endpoints informer from the context. +func Get(ctx context.Context) corev1.EndpointsInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.EndpointsInformer)(nil)) + } + return untyped.(corev1.EndpointsInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/fake/fake.go new file mode 100644 index 000000000..2504c1bde --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/corev1/endpoints" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = endpoints.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().Endpoints() + return context.WithValue(ctx, endpoints.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/fake/fake.go new file mode 100644 index 000000000..fa76a3628 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = namespace.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().Namespaces() + return context.WithValue(ctx, namespace.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/namespace.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/namespace.go new file mode 100644 index 000000000..ffc3237e6 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/namespace/namespace.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namespace + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Namespaces() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Namespace informer from the context. +func Get(ctx context.Context) corev1.NamespaceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.NamespaceInformer)(nil)) + } + return untyped.(corev1.NamespaceInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go new file mode 100644 index 000000000..a4b8c8cee --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = pod.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().Pods() + return context.WithValue(ctx, pod.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/pod.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/pod.go new file mode 100644 index 000000000..2ce0f56a4 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/pod/pod.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Pods() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Pod informer from the context. +func Get(ctx context.Context) corev1.PodInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.PodInformer)(nil)) + } + return untyped.(corev1.PodInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/fake/fake.go new file mode 100644 index 000000000..37a4a5153 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = secret.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().Secrets() + return context.WithValue(ctx, secret.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/secret.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/secret.go new file mode 100644 index 000000000..30a9dc069 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/secret/secret.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package secret + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Secrets() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Secret informer from the context. +func Get(ctx context.Context) corev1.SecretInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.SecretInformer)(nil)) + } + return untyped.(corev1.SecretInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/fake/fake.go new file mode 100644 index 000000000..8c52d19ab --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/corev1/service" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = service.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().Services() + return context.WithValue(ctx, service.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/service.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/service.go new file mode 100644 index 000000000..442b29cea --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/service/service.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Services() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Service informer from the context. +func Get(ctx context.Context) corev1.ServiceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.ServiceInformer)(nil)) + } + return untyped.(corev1.ServiceInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/fake/fake.go new file mode 100644 index 000000000..f4e075895 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" +) + +var Get = serviceaccount.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().ServiceAccounts() + return context.WithValue(ctx, serviceaccount.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/service.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/service.go new file mode 100644 index 000000000..7dc45f01c --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/corev1/serviceaccount/service.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "context" + + corev1 "k8s.io/client-go/informers/core/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().ServiceAccounts() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Service Account informer from the context. +func Get(ctx context.Context) corev1.ServiceAccountInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (corev1.ServiceAccountInformer)(nil)) + } + return untyped.(corev1.ServiceAccountInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/factory.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/factory.go new file mode 100644 index 000000000..d1a68de11 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/factory.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "context" + + "k8s.io/client-go/informers" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/kubeclient" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + kc := kubeclient.Get(ctx) + return context.WithValue(ctx, Key{}, + informers.NewSharedInformerFactory(kc, controller.GetResyncPeriod(ctx))) +} + +// Get extracts the Kubernetes InformerFactory from the context. +func Get(ctx context.Context) informers.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (informers.SharedInformerFactory)(nil)) + } + return untyped.(informers.SharedInformerFactory) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/fake/fake.go new file mode 100644 index 000000000..7588aec5a --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/factory/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "k8s.io/client-go/informers" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/kubeclient/fake" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + kc := fake.Get(ctx) + return context.WithValue(ctx, factory.Key{}, + informers.NewSharedInformerFactory(kc, controller.GetResyncPeriod(ctx))) +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/fake/fake.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/fake/fake.go new file mode 100644 index 000000000..deedcba4f --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/fake/fake.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory/fake" + "github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding" +) + +var Get = rolebinding.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Rbac().V1().RoleBindings() + return context.WithValue(ctx, rolebinding.Key{}, inf), inf.Informer() +} diff --git a/vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/rolebinding.go b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/rolebinding.go new file mode 100644 index 000000000..f7dca0d23 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/informers/kubeinformers/rbacv1/rolebinding/rolebinding.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rolebinding + +import ( + "context" + + rbacv1 "k8s.io/client-go/informers/rbac/v1" + + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/informers/kubeinformers/factory" + "github.com/knative/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used as the key for associating information +// with a context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Rbac().V1().RoleBindings() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the Kubernetes Role Binding informer from the context. +func Get(ctx context.Context) rbacv1.RoleBindingInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch %T from context.", (rbacv1.RoleBindingInformer)(nil)) + } + return untyped.(rbacv1.RoleBindingInformer) +} diff --git a/vendor/github.com/knative/pkg/injection/interface.go b/vendor/github.com/knative/pkg/injection/interface.go new file mode 100644 index 000000000..6413b0619 --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/interface.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package injection + +import ( + "context" + "sync" + + "k8s.io/client-go/rest" + + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/controller" +) + +// Interface is the interface for interacting with injection +// implementations, such as our Default and Fake below. +type Interface interface { + // RegisterClient registers a new injector callback for associating + // a new client with a context. + RegisterClient(ClientInjector) + + // GetClients fetches all of the registered client injectors. + GetClients() []ClientInjector + + // RegisterInformerFactory registers a new injector callback for associating + // a new informer factory with a context. + RegisterInformerFactory(InformerFactoryInjector) + + // GetInformerFactories fetches all of the registered informer factory injectors. + GetInformerFactories() []InformerFactoryInjector + + // RegisterInformer registers a new injector callback for associating + // a new informer with a context. + RegisterInformer(InformerInjector) + + // GetInformers fetches all of the registered informer injectors. + GetInformers() []InformerInjector + + // SetupInformers runs all of the injectors against a context, starting with + // the clients and the given rest.Config. The resulting context is returned + // along with a list of the .Informer() for each of the injected informers, + // which is suitable for passing to controller.StartInformers(). + // This does not setup or start any controllers. + SetupInformers(context.Context, *rest.Config) (context.Context, []controller.Informer) +} + +type ControllerConstructor func(context.Context, configmap.Watcher) *controller.Impl + +var ( + // Check that impl implements Interface + _ Interface = (*impl)(nil) + + // Default is the injection interface with which informers should register + // to make themselves available to the controller process when reconcilers + // are being run for real. + Default Interface = &impl{} + + // Fake is the injection interface with which informers should register + // to make themselves available to the controller process when it is being + // unit tested. + Fake Interface = &impl{} +) + +type impl struct { + m sync.RWMutex + + clients []ClientInjector + factories []InformerFactoryInjector + informers []InformerInjector +} diff --git a/vendor/github.com/knative/pkg/injection/sharedmain/main.go b/vendor/github.com/knative/pkg/injection/sharedmain/main.go new file mode 100644 index 000000000..08c1ef2bf --- /dev/null +++ b/vendor/github.com/knative/pkg/injection/sharedmain/main.go @@ -0,0 +1,113 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sharedmain + +import ( + "context" + "flag" + "log" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + "github.com/knative/pkg/injection/clients/kubeclient" + "github.com/knative/pkg/logging" + "github.com/knative/pkg/metrics" + "github.com/knative/pkg/signals" + "github.com/knative/pkg/system" + "go.uber.org/zap" +) + +func Main(component string, ctors ...injection.ControllerConstructor) { + // Set up signals so we handle the first shutdown signal gracefully. + MainWithContext(signals.NewContext(), component, ctors...) +} + +func MainWithContext(ctx context.Context, component string, ctors ...injection.ControllerConstructor) { + var ( + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") + ) + flag.Parse() + + cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig) + if err != nil { + log.Fatal("Error building kubeconfig", err) + } + MainWithConfig(ctx, component, cfg, ctors...) +} + +func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, ctors ...injection.ControllerConstructor) { + // Set up our logger. + loggingConfigMap, err := configmap.Load("/etc/config-logging") + if err != nil { + log.Fatal("Error loading logging configuration:", err) + } + loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap) + if err != nil { + log.Fatal("Error parsing logging configuration:", err) + } + logger, atomicLevel := logging.NewLoggerFromConfig(loggingConfig, component) + defer flush(logger) + ctx = logging.WithLogger(ctx, logger) + + logger.Infof("Registering %d clients", len(injection.Default.GetClients())) + logger.Infof("Registering %d informer factories", len(injection.Default.GetInformerFactories())) + logger.Infof("Registering %d informers", len(injection.Default.GetInformers())) + logger.Infof("Registering %d controllers", len(ctors)) + + // Adjust our client's rate limits based on the number of controller's we are running. + cfg.QPS = float32(len(ctors)) * rest.DefaultQPS + cfg.Burst = len(ctors) * rest.DefaultBurst + + ctx, informers := injection.Default.SetupInformers(ctx, cfg) + + // TODO(mattmoor): This should itself take a context and be injection-based. + cmw := configmap.NewInformedWatcher(kubeclient.Get(ctx), system.Namespace()) + + // Based on the reconcilers we have linked, build up the set of controllers to run. + controllers := make([]*controller.Impl, 0, len(ctors)) + for _, cf := range ctors { + controllers = append(controllers, cf(ctx, cmw)) + } + + // Watch the logging config map and dynamically update logging levels. + cmw.Watch(logging.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, component)) + // Watch the observability config map and dynamically update metrics exporter. + cmw.Watch(metrics.ConfigMapName(), metrics.UpdateExporterFromConfigMap(component, logger)) + if err := cmw.Start(ctx.Done()); err != nil { + logger.Fatalw("failed to start configuration manager", zap.Error(err)) + } + + // Start all of the informers and wait for them to sync. + logger.Info("Starting informers.") + if err := controller.StartInformers(ctx.Done(), informers...); err != nil { + logger.Fatalw("Failed to start informers", err) + } + + // Start all of the controllers. + logger.Info("Starting controllers...") + controller.StartAll(ctx.Done(), controllers...) +} + +func flush(logger *zap.SugaredLogger) { + logger.Sync() + metrics.FlushExporter() +} diff --git a/vendor/github.com/knative/pkg/kmeta/OWNERS b/vendor/github.com/knative/pkg/kmeta/OWNERS new file mode 100644 index 000000000..29b0d9f25 --- /dev/null +++ b/vendor/github.com/knative/pkg/kmeta/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- kmeta-approvers diff --git a/vendor/github.com/knative/pkg/kmeta/accessor.go b/vendor/github.com/knative/pkg/kmeta/accessor.go new file mode 100644 index 000000000..07c69beda --- /dev/null +++ b/vendor/github.com/knative/pkg/kmeta/accessor.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kmeta + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" +) + +// Accessor is a collection of interfaces from metav1.TypeMeta, +// runtime.Object and metav1.Object that Kubernetes API types +// registered with runtime.Scheme must support. +type Accessor interface { + // Interfaces for metav1.TypeMeta + GroupVersionKind() schema.GroupVersionKind + SetGroupVersionKind(gvk schema.GroupVersionKind) + + // Interfaces for runtime.Object + GetObjectKind() schema.ObjectKind + DeepCopyObject() runtime.Object + + // Interfaces for metav1.Object + GetNamespace() string + SetNamespace(namespace string) + GetName() string + SetName(name string) + GetGenerateName() string + SetGenerateName(name string) + GetUID() types.UID + SetUID(uid types.UID) + GetResourceVersion() string + SetResourceVersion(version string) + GetGeneration() int64 + SetGeneration(generation int64) + GetSelfLink() string + SetSelfLink(selfLink string) + GetCreationTimestamp() metav1.Time + SetCreationTimestamp(timestamp metav1.Time) + GetDeletionTimestamp() *metav1.Time + SetDeletionTimestamp(timestamp *metav1.Time) + GetDeletionGracePeriodSeconds() *int64 + SetDeletionGracePeriodSeconds(*int64) + GetLabels() map[string]string + SetLabels(labels map[string]string) + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + GetInitializers() *metav1.Initializers + SetInitializers(initializers *metav1.Initializers) + GetFinalizers() []string + SetFinalizers(finalizers []string) + GetOwnerReferences() []metav1.OwnerReference + SetOwnerReferences([]metav1.OwnerReference) + GetClusterName() string + SetClusterName(clusterName string) +} + +// DeletionHandlingAccessor tries to convert given interface into Accessor first; +// and to handle deletion, it try to fetch info from DeletedFinalStateUnknown on failure. +// The name is a reference to cache.DeletionHandlingMetaNamespaceKeyFunc +func DeletionHandlingAccessor(obj interface{}) (Accessor, error) { + accessor, ok := obj.(Accessor) + if !ok { + // To handle obj deletion, try to fetch info from DeletedFinalStateUnknown. + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + return nil, fmt.Errorf("Couldn't get Accessor from tombstone %#v", obj) + } + accessor, ok = tombstone.Obj.(Accessor) + if !ok { + return nil, fmt.Errorf("The object that Tombstone contained is not of kmeta.Accessor %#v", obj) + } + } + + return accessor, nil +} diff --git a/vendor/github.com/knative/pkg/kmeta/doc.go b/vendor/github.com/knative/pkg/kmeta/doc.go new file mode 100644 index 000000000..53ff38d72 --- /dev/null +++ b/vendor/github.com/knative/pkg/kmeta/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kmeta provides Knative utilities for operating on Kubernetes +// resources' ObjectMeta. +package kmeta diff --git a/vendor/github.com/knative/pkg/kmeta/labels.go b/vendor/github.com/knative/pkg/kmeta/labels.go new file mode 100644 index 000000000..f9a72d8bf --- /dev/null +++ b/vendor/github.com/knative/pkg/kmeta/labels.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kmeta + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// The methods in this file are used for managing subresources in cases where +// a controller instantiates different resources for each version of itself. +// There are two sets of methods available here: +// * `*VersionLabel*`: these methods act on `metadata.resourceVersion` and +// create new labels for EVERY change to the resource (incl. `/status`). +// * `*GenerationLabel*`: these methods act on `metadata.generation` and +// create new labels for changes to the resource's "spec" (typically, but +// some K8s resources change `metadata.generation` for annotations as well +// e.g. Deployment). +// +// For example, if an A might instantiate N B's at version 1 and M B's at +// version 2 then it can use MakeVersionLabels to decorate each subresource +// with the appropriate labels for the version at which it was instantiated. +// +// During reconciliation, MakeVersionLabelSelector can be used with the +// informer listers to access the appropriate subresources for the current +// version of the parent resource. +// +// Likewise during reconciliation, MakeOldVersionLabelSelector can be used +// with the API client's DeleteCollection method to clean up subresources +// for older versions of the resource. + +// MakeVersionLabels constructs a set of labels to apply to subresources +// instantiated at this version of the parent resource, so that we can +// efficiently select them. +func MakeVersionLabels(om metav1.ObjectMetaAccessor) labels.Set { + return map[string]string{ + "controller": string(om.GetObjectMeta().GetUID()), + "version": om.GetObjectMeta().GetResourceVersion(), + } +} + +// MakeVersionLabelSelector constructs a selector for subresources +// instantiated at this version of the parent resource. This keys +// off of the labels populated by MakeVersionLabels. +func MakeVersionLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector { + return labels.SelectorFromSet(MakeVersionLabels(om)) +} + +// MakeOldVersionLabelSelector constructs a selector for subresources +// instantiated at an older version of the parent resource. This keys +// off of the labels populated by MakeVersionLabels. +func MakeOldVersionLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector { + return labels.NewSelector().Add( + mustNewRequirement("controller", selection.Equals, []string{string(om.GetObjectMeta().GetUID())}), + mustNewRequirement("version", selection.NotEquals, []string{om.GetObjectMeta().GetResourceVersion()}), + ) +} + +// MakeGenerationLabels constructs a set of labels to apply to subresources +// instantiated at this version of the parent resource, so that we can +// efficiently select them. +func MakeGenerationLabels(om metav1.ObjectMetaAccessor) labels.Set { + return map[string]string{ + "controller": string(om.GetObjectMeta().GetUID()), + "generation": genStr(om), + } +} + +// MakeGenerationLabelSelector constructs a selector for subresources +// instantiated at this version of the parent resource. This keys +// off of the labels populated by MakeGenerationLabels. +func MakeGenerationLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector { + return labels.SelectorFromSet(MakeGenerationLabels(om)) +} + +// MakeOldGenerationLabelSelector constructs a selector for subresources +// instantiated at an older version of the parent resource. This keys +// off of the labels populated by MakeGenerationLabels. +func MakeOldGenerationLabelSelector(om metav1.ObjectMetaAccessor) labels.Selector { + return labels.NewSelector().Add( + mustNewRequirement("controller", selection.Equals, []string{string(om.GetObjectMeta().GetUID())}), + mustNewRequirement("generation", selection.NotEquals, []string{genStr(om)}), + ) +} + +func genStr(om metav1.ObjectMetaAccessor) string { + return fmt.Sprintf("%05d", om.GetObjectMeta().GetGeneration()) +} + +// mustNewRequirement panics if there are any errors constructing our selectors. +func mustNewRequirement(key string, op selection.Operator, vals []string) labels.Requirement { + r, err := labels.NewRequirement(key, op, vals) + if err != nil { + panic(fmt.Sprintf("mustNewRequirement(%v, %v, %v) = %v", key, op, vals, err)) + } + return *r +} diff --git a/vendor/github.com/knative/pkg/kmeta/owner_references.go b/vendor/github.com/knative/pkg/kmeta/owner_references.go new file mode 100644 index 000000000..2e9a1289f --- /dev/null +++ b/vendor/github.com/knative/pkg/kmeta/owner_references.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kmeta + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// OwnerRefable indicates that a particular type has sufficient +// information to produce a metav1.OwnerReference to an object. +type OwnerRefable interface { + metav1.ObjectMetaAccessor + + // GetGroupVersionKind returns a GroupVersionKind. The name is chosen + // to avoid collision with TypeMeta's GroupVersionKind() method. + // See: https://issues.k8s.io/3030 + GetGroupVersionKind() schema.GroupVersionKind +} + +// NewControllerRef creates an OwnerReference pointing to the given controller. +func NewControllerRef(obj OwnerRefable) *metav1.OwnerReference { + return metav1.NewControllerRef(obj.GetObjectMeta(), obj.GetGroupVersionKind()) +} diff --git a/vendor/github.com/knative/pkg/logging/OWNERS b/vendor/github.com/knative/pkg/logging/OWNERS new file mode 100644 index 000000000..fa4854ba0 --- /dev/null +++ b/vendor/github.com/knative/pkg/logging/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- logging-approvers diff --git a/vendor/github.com/knative/pkg/logging/config.go b/vendor/github.com/knative/pkg/logging/config.go new file mode 100644 index 000000000..236a44986 --- /dev/null +++ b/vendor/github.com/knative/pkg/logging/config.go @@ -0,0 +1,198 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + + "github.com/knative/pkg/changeset" + "github.com/knative/pkg/logging/logkey" +) + +const ConfigMapNameEnv = "CONFIG_LOGGING_NAME" + +// NewLogger creates a logger with the supplied configuration. +// In addition to the logger, it returns AtomicLevel that can +// be used to change the logging level at runtime. +// If configuration is empty, a fallback configuration is used. +// If configuration cannot be used to instantiate a logger, +// the same fallback configuration is used. +func NewLogger(configJSON string, levelOverride string, opts ...zap.Option) (*zap.SugaredLogger, zap.AtomicLevel) { + logger, atomicLevel, err := newLoggerFromConfig(configJSON, levelOverride, opts) + if err == nil { + return enrichLoggerWithCommitID(logger.Sugar()), atomicLevel + } + + loggingCfg := zap.NewProductionConfig() + if len(levelOverride) > 0 { + if level, err := levelFromString(levelOverride); err == nil { + loggingCfg.Level = zap.NewAtomicLevelAt(*level) + } + } + + logger, err2 := loggingCfg.Build(opts...) + if err2 != nil { + panic(err2) + } + return enrichLoggerWithCommitID(logger.Named("fallback-logger").Sugar()), loggingCfg.Level +} + +func enrichLoggerWithCommitID(logger *zap.SugaredLogger) *zap.SugaredLogger { + commmitID, err := changeset.Get() + if err == nil { + // Enrich logs with GitHub commit ID. + return logger.With(zap.String(logkey.GitHubCommitID, commmitID)) + } + + logger.Warnf("Fetch GitHub commit ID from kodata failed: %v", err) + return logger +} + +// NewLoggerFromConfig creates a logger using the provided Config +func NewLoggerFromConfig(config *Config, name string, opts ...zap.Option) (*zap.SugaredLogger, zap.AtomicLevel) { + logger, level := NewLogger(config.LoggingConfig, config.LoggingLevel[name].String(), opts...) + return logger.Named(name), level +} + +func newLoggerFromConfig(configJSON string, levelOverride string, opts []zap.Option) (*zap.Logger, zap.AtomicLevel, error) { + if len(configJSON) == 0 { + return nil, zap.AtomicLevel{}, errors.New("empty logging configuration") + } + + var loggingCfg zap.Config + if err := json.Unmarshal([]byte(configJSON), &loggingCfg); err != nil { + return nil, zap.AtomicLevel{}, err + } + + if len(levelOverride) > 0 { + if level, err := levelFromString(levelOverride); err == nil { + loggingCfg.Level = zap.NewAtomicLevelAt(*level) + } + } + + logger, err := loggingCfg.Build(opts...) + if err != nil { + return nil, zap.AtomicLevel{}, err + } + + logger.Info("Successfully created the logger.", zap.String(logkey.JSONConfig, configJSON)) + logger.Sugar().Infof("Logging level set to %v", loggingCfg.Level) + return logger, loggingCfg.Level, nil +} + +// Config contains the configuration defined in the logging ConfigMap. +// +k8s:deepcopy-gen=true +type Config struct { + LoggingConfig string + LoggingLevel map[string]zapcore.Level +} + +const defaultZLC = `{ + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } +}` + +// NewConfigFromMap creates a LoggingConfig from the supplied map, +// expecting the given list of components. +func NewConfigFromMap(data map[string]string) (*Config, error) { + lc := &Config{} + if zlc, ok := data["zap-logger-config"]; ok { + lc.LoggingConfig = zlc + } else { + lc.LoggingConfig = defaultZLC + } + + lc.LoggingLevel = make(map[string]zapcore.Level) + for k, v := range data { + if component := strings.TrimPrefix(k, "loglevel."); component != k && component != "" { + if len(v) > 0 { + level, err := levelFromString(v) + if err != nil { + return nil, err + } + lc.LoggingLevel[component] = *level + } + } + } + return lc, nil +} + +// NewConfigFromConfigMap creates a LoggingConfig from the supplied ConfigMap, +// expecting the given list of components. +func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { + return NewConfigFromMap(configMap.Data) +} + +func levelFromString(level string) (*zapcore.Level, error) { + var zapLevel zapcore.Level + if err := zapLevel.UnmarshalText([]byte(level)); err != nil { + return nil, fmt.Errorf("invalid logging level: %v", level) + } + return &zapLevel, nil +} + +// UpdateLevelFromConfigMap returns a helper func that can be used to update the logging level +// when a config map is updated +func UpdateLevelFromConfigMap(logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel, + levelKey string) func(configMap *corev1.ConfigMap) { + return func(configMap *corev1.ConfigMap) { + loggingConfig, err := NewConfigFromConfigMap(configMap) + if err != nil { + logger.Errorw("Failed to parse the logging configmap. Previous config map will be used.", zap.Error(err)) + return + } + + level := loggingConfig.LoggingLevel[levelKey] + if atomicLevel.Level() != level { + logger.Infof("Updating logging level for %v from %v to %v.", levelKey, atomicLevel.Level(), level) + atomicLevel.SetLevel(level) + } + } +} + +// ConfigMapName gets the name of the logging ConfigMap +func ConfigMapName() string { + cm := os.Getenv(ConfigMapNameEnv) + if cm == "" { + return "config-logging" + } + return cm +} diff --git a/vendor/github.com/knative/pkg/logging/logger.go b/vendor/github.com/knative/pkg/logging/logger.go new file mode 100644 index 000000000..903dc5825 --- /dev/null +++ b/vendor/github.com/knative/pkg/logging/logger.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "context" + + "go.uber.org/zap" +) + +type loggerKey struct{} + +// This logger is used when there is no logger attached to the context. +// Rather than returning nil and causing a panic, we will use the fallback +// logger. Fallback logger is tagged with logger=fallback to make sure +// that code that doesn't set the logger correctly can be caught at runtime. +var fallbackLogger *zap.SugaredLogger + +func init() { + if logger, err := zap.NewProduction(); err != nil { + // We failed to create a fallback logger. Our fallback + // unfortunately falls back to noop. + fallbackLogger = zap.NewNop().Sugar() + } else { + fallbackLogger = logger.Named("fallback").Sugar() + } +} + +// WithLogger returns a copy of parent context in which the +// value associated with logger key is the supplied logger. +func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// FromContext returns the logger stored in context. +// Returns nil if no logger is set in context, or if the stored value is +// not of correct type. +func FromContext(ctx context.Context) *zap.SugaredLogger { + if logger, ok := ctx.Value(loggerKey{}).(*zap.SugaredLogger); ok { + return logger + } + return fallbackLogger +} diff --git a/vendor/github.com/knative/pkg/logging/logkey/constants.go b/vendor/github.com/knative/pkg/logging/logkey/constants.go new file mode 100644 index 000000000..e90abec97 --- /dev/null +++ b/vendor/github.com/knative/pkg/logging/logkey/constants.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logkey + +const ( + // ControllerType is the key used for controller type in structured logs + ControllerType = "knative.dev/controller" + + // Key is the key (namespace/name) being reconciled. + Key = "knative.dev/key" + + // TraceId is the key used to track an asynchronous or long running operation. + TraceId = "knative.dev/traceid" + + // Namespace is the key used for namespace in structured logs + Namespace = "knative.dev/namespace" + + // JSONConfig is the key used for JSON configurations (not to be confused by the Configuration object) + JSONConfig = "knative.dev/jsonconfig" + + // Kind is the key used to represent kind of an object in logs + Kind = "knative.dev/kind" + + // Name is the key used to represent name of an object in logs + Name = "knative.dev/name" + + // Operation is the key used to represent an operation in logs + Operation = "knative.dev/operation" + + // Resource is the key used to represent a resource in logs + Resource = "knative.dev/resource" + + // SubResource is a generic key used to represent a sub-resource in logs + SubResource = "knative.dev/subresource" + + // UserInfo is the key used to represent a user information in logs + UserInfo = "knative.dev/userinfo" + + // Pod is the key used to represent a pod's name in logs + Pod = "knative.dev/pod" + + // Deployment is the key used to represent a deployment's name in logs + Deployment = "knative.dev/deployment" + + // KubernetesService is the key used to represent a Kubernetes service name in logs + KubernetesService = "knative.dev/k8sservice" + + // GitHubCommitID is the key used to represent the GitHub Commit ID where the + // Knative component was built from in logs + GitHubCommitID = "commit" +) diff --git a/vendor/github.com/knative/pkg/logging/testing/util.go b/vendor/github.com/knative/pkg/logging/testing/util.go new file mode 100644 index 000000000..05179648f --- /dev/null +++ b/vendor/github.com/knative/pkg/logging/testing/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "sync" + "testing" + + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + + "github.com/knative/pkg/logging" +) + +var ( + loggers = make(map[string]*zap.SugaredLogger) + m sync.Mutex +) + +// TestLogger gets a logger to use in unit and end to end tests +func TestLogger(t *testing.T) *zap.SugaredLogger { + m.Lock() + defer m.Unlock() + + logger, ok := loggers[t.Name()] + + if ok { + return logger + } + + opts := zaptest.WrapOptions( + zap.AddCaller(), + zap.Development(), + ) + + logger = zaptest.NewLogger(t, opts).Sugar().Named(t.Name()) + loggers[t.Name()] = logger + + return logger +} + +// ClearAll removes all the testing loggers. +// `go test -count=X` executes runs in the same process, thus the map +// persists between the runs, but the `t` will no longer be valid and will +// cause a panic deep inside testing code. +func ClearAll() { + loggers = make(map[string]*zap.SugaredLogger) +} + +// TestContextWithLogger returns a context with a logger to be used in tests +func TestContextWithLogger(t *testing.T) context.Context { + return logging.WithLogger(context.TODO(), TestLogger(t)) +} diff --git a/vendor/github.com/knative/pkg/logging/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/logging/zz_generated.deepcopy.go new file mode 100644 index 000000000..8611e9395 --- /dev/null +++ b/vendor/github.com/knative/pkg/logging/zz_generated.deepcopy.go @@ -0,0 +1,48 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package logging + +import ( + zapcore "go.uber.org/zap/zapcore" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.LoggingLevel != nil { + in, out := &in.LoggingLevel, &out.LoggingLevel + *out = make(map[string]zapcore.Level, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/knative/pkg/metrics/OWNERS b/vendor/github.com/knative/pkg/metrics/OWNERS new file mode 100644 index 000000000..6d3966df4 --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- metrics-approvers diff --git a/vendor/github.com/knative/pkg/metrics/config.go b/vendor/github.com/knative/pkg/metrics/config.go new file mode 100644 index 000000000..cdce9b53f --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/config.go @@ -0,0 +1,293 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "time" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +const ( + DomainEnv = "METRICS_DOMAIN" + ConfigMapNameEnv = "CONFIG_OBSERVABILITY_NAME" +) + +// metricsBackend specifies the backend to use for metrics +type metricsBackend string + +const ( + // The following keys are used to configure metrics reporting. + // See https://github.com/knative/serving/blob/master/config/config-observability.yaml + // for details. + AllowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics" + BackendDestinationKey = "metrics.backend-destination" + ReportingPeriodKey = "metrics.reporting-period-seconds" + StackdriverProjectIDKey = "metrics.stackdriver-project-id" + + // Stackdriver is used for Stackdriver backend + Stackdriver metricsBackend = "stackdriver" + // Prometheus is used for Prometheus backend + Prometheus metricsBackend = "prometheus" + + defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" + + defaultPrometheusPort = 9090 + maxPrometheusPort = 65535 + minPrometheusPort = 1024 +) + +// ExporterOptions contains options for configuring the exporter. +type ExporterOptions struct { + // Domain is the metrics domain. e.g. "knative.dev". Must be present. + Domain string + + // Component is the name of the component that emits the metrics. e.g. + // "activator", "queue_proxy". Should only contains alphabets and underscore. + // Must be present. + Component string + + // PrometheusPort is the port to expose metrics if metrics backend is Prometheus. + // It should be between maxPrometheusPort and maxPrometheusPort. 0 value means + // using the default 9090 value. If is ignored if metrics backend is not + // Prometheus. + PrometheusPort int + + // ConfigMap is the data from config map config-observability. Must be present. + // See https://github.com/knative/serving/blob/master/config/config-observability.yaml + // for details. + ConfigMap map[string]string +} + +type metricsConfig struct { + // The metrics domain. e.g. "serving.knative.dev" or "build.knative.dev". + domain string + // The component that emits the metrics. e.g. "activator", "autoscaler". + component string + // The metrics backend destination. + backendDestination metricsBackend + // reportingPeriod specifies the interval between reporting aggregated views. + // If duration is less than or equal to zero, it enables the default behavior. + reportingPeriod time.Duration + + // ---- Prometheus specific below ---- + // prometheusPort is the port where metrics are exposed in Prometheus + // format. It defaults to 9090. + prometheusPort int + + // ---- Stackdriver specific below ---- + // stackdriverProjectID is the stackdriver project ID where the stats data are + // uploaded to. This is not the GCP project ID. + stackdriverProjectID string + // allowStackdriverCustomMetrics indicates whether it is allowed to send metrics to + // Stackdriver using "global" resource type and custom metric type if the + // metrics are not supported by "knative_revision" resource type. Setting this + // flag to "true" could cause extra Stackdriver charge. + // If backendDestination is not Stackdriver, this is ignored. + allowStackdriverCustomMetrics bool + // True if backendDestination equals to "stackdriver". Store this in a variable + // to reduce string comparison operations. + isStackdriverBackend bool + // stackdriverMetricTypePrefix is the metric domain joins component, e.g. + // "knative.dev/serving/activator". Store this in a variable to reduce string + // join operations. + stackdriverMetricTypePrefix string + // stackdriverCustomMetricTypePrefix is "custom.googleapis.com/knative.dev" joins + // component, e.g. "custom.googleapis.com/knative.dev/serving/activator". + // Store this in a variable to reduce string join operations. + stackdriverCustomMetricTypePrefix string +} + +func getMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metricsConfig, error) { + var mc metricsConfig + + if ops.Domain == "" { + return nil, errors.New("metrics domain cannot be empty") + } + mc.domain = ops.Domain + + if ops.Component == "" { + return nil, errors.New("metrics component name cannot be empty") + } + mc.component = ops.Component + + if ops.ConfigMap == nil { + return nil, errors.New("metrics config map cannot be empty") + } + m := ops.ConfigMap + // Read backend setting from environment variable first + backend := os.Getenv(defaultBackendEnvName) + if backend == "" { + // Use Prometheus if DEFAULT_METRICS_BACKEND does not exist or is empty + backend = string(Prometheus) + } + // Override backend if it is setting in config map. + if backendFromConfig, ok := m[BackendDestinationKey]; ok { + backend = backendFromConfig + } + lb := metricsBackend(strings.ToLower(backend)) + switch lb { + case Stackdriver, Prometheus: + mc.backendDestination = lb + default: + return nil, fmt.Errorf("unsupported metrics backend value %q", backend) + } + + if mc.backendDestination == Prometheus { + pp := ops.PrometheusPort + if pp == 0 { + pp = defaultPrometheusPort + } + if pp < minPrometheusPort || pp > maxPrometheusPort { + return nil, fmt.Errorf("invalid port %v, should between %v and %v", pp, minPrometheusPort, maxPrometheusPort) + } + mc.prometheusPort = pp + } + + // If stackdriverProjectIDKey is not provided for stackdriver backend destination, OpenCensus will try to + // use the application default credentials. If that is not available, Opencensus would fail to create the + // metrics exporter. + if mc.backendDestination == Stackdriver { + mc.stackdriverProjectID = m[StackdriverProjectIDKey] + mc.isStackdriverBackend = true + mc.stackdriverMetricTypePrefix = path.Join(mc.domain, mc.component) + mc.stackdriverCustomMetricTypePrefix = path.Join(customMetricTypePrefix, mc.component) + if ascmStr, ok := m[AllowStackdriverCustomMetricsKey]; ok && ascmStr != "" { + ascmBool, err := strconv.ParseBool(ascmStr) + if err != nil { + return nil, fmt.Errorf("invalid %s value %q", AllowStackdriverCustomMetricsKey, ascmStr) + } + mc.allowStackdriverCustomMetrics = ascmBool + } + } + + // If reporting period is specified, use the value from the configuration. + // If not, set a default value based on the selected backend. + // Each exporter makes different promises about what the lowest supported + // reporting period is. For Stackdriver, this value is 1 minute. + // For Prometheus, we will use a lower value since the exporter doesn't + // push anything but just responds to pull requests, and shorter durations + // do not really hurt the performance and we rely on the scraping configuration. + if repStr, ok := m[ReportingPeriodKey]; ok && repStr != "" { + repInt, err := strconv.Atoi(repStr) + if err != nil { + return nil, fmt.Errorf("invalid %s value %q", ReportingPeriodKey, repStr) + } + mc.reportingPeriod = time.Duration(repInt) * time.Second + } else if mc.backendDestination == Stackdriver { + mc.reportingPeriod = 60 * time.Second + } else if mc.backendDestination == Prometheus { + mc.reportingPeriod = 5 * time.Second + } + + return &mc, nil +} + +// UpdateExporterFromConfigMap returns a helper func that can be used to update the exporter +// when a config map is updated. +func UpdateExporterFromConfigMap(component string, logger *zap.SugaredLogger) func(configMap *corev1.ConfigMap) { + domain := Domain() + return func(configMap *corev1.ConfigMap) { + UpdateExporter(ExporterOptions{ + Domain: domain, + Component: component, + ConfigMap: configMap.Data, + }, logger) + } +} + +// UpdateExporter updates the exporter based on the given ExporterOptions. +func UpdateExporter(ops ExporterOptions, logger *zap.SugaredLogger) error { + newConfig, err := getMetricsConfig(ops, logger) + if err != nil { + if ce := getCurMetricsExporter(); ce == nil { + // Fail the process if there doesn't exist an exporter. + logger.Errorw("Failed to get a valid metrics config", zap.Error(err)) + } else { + logger.Errorw("Failed to get a valid metrics config; Skip updating the metrics exporter", zap.Error(err)) + } + return err + } + + if isNewExporterRequired(newConfig) { + logger.Info("Flushing the existing exporter before setting up the new exporter.") + FlushExporter() + e, err := newMetricsExporter(newConfig, logger) + if err != nil { + logger.Errorf("Failed to update a new metrics exporter based on metric config %v. error: %v", newConfig, err) + return err + } + existingConfig := getCurMetricsConfig() + setCurMetricsExporter(e) + logger.Infof("Successfully updated the metrics exporter; old config: %v; new config %v", existingConfig, newConfig) + } + + setCurMetricsConfig(newConfig) + return nil +} + +// isNewExporterRequired compares the non-nil newConfig against curMetricsConfig. When backend changes, +// or stackdriver project ID changes for stackdriver backend, we need to update the metrics exporter. +func isNewExporterRequired(newConfig *metricsConfig) bool { + cc := getCurMetricsConfig() + if cc == nil || newConfig.backendDestination != cc.backendDestination { + return true + } else if newConfig.backendDestination == Stackdriver && newConfig.stackdriverProjectID != cc.stackdriverProjectID { + return true + } + + return false +} + +// ConfigMapName gets the name of the metrics ConfigMap +func ConfigMapName() string { + cm := os.Getenv(ConfigMapNameEnv) + if cm == "" { + return "config-observability" + } + return cm +} + +// Domain holds the metrics domain to use for surfacing metrics. +func Domain() string { + if domain := os.Getenv(DomainEnv); domain != "" { + return domain + } + + panic(fmt.Sprintf(`The environment variable %q is not set + +If this is a process running on Kubernetes, then it should be specifying +this via: + + env: + - name: %s + value: knative.dev/some-repository + +If this is a Go unit test consuming metric.Domain() then it should add the +following import: + +import ( + _ "github.com/knative/pkg/metrics/testing" +)`, DomainEnv, DomainEnv)) +} diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/github.com/knative/pkg/metrics/doc.go similarity index 71% rename from vendor/k8s.io/api/node/v1beta1/doc.go rename to vendor/github.com/knative/pkg/metrics/doc.go index e87583cea..631bb5966 100644 --- a/vendor/k8s.io/api/node/v1beta1/doc.go +++ b/vendor/github.com/knative/pkg/metrics/doc.go @@ -1,12 +1,9 @@ /* -Copyright 2019 The Kubernetes Authors. - +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,10 +11,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true - -// +groupName=node.k8s.io - -package v1beta1 // import "k8s.io/api/node/v1beta1" +// Package metrics provides Knative utilities for exporting metrics to Stackdriver +// backend or Prometheus backend based on config-observability settings. +package metrics diff --git a/vendor/github.com/knative/pkg/metrics/exporter.go b/vendor/github.com/knative/pkg/metrics/exporter.go new file mode 100644 index 000000000..e20637e3a --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/exporter.go @@ -0,0 +1,106 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "go.opencensus.io/stats/view" + "go.uber.org/zap" +) + +var ( + curMetricsExporter view.Exporter + curMetricsConfig *metricsConfig + metricsMux sync.Mutex +) + +type flushable interface { + // Flush waits for metrics to be uploaded. + Flush() +} + +// newMetricsExporter gets a metrics exporter based on the config. +func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + // If there is a Prometheus Exporter server running, stop it. + resetCurPromSrv() + ce := getCurMetricsExporter() + if ce != nil { + // UnregisterExporter is idempotent and it can be called multiple times for the same exporter + // without side effects. + view.UnregisterExporter(ce) + } + var err error + var e view.Exporter + switch config.backendDestination { + case Stackdriver: + e, err = newStackdriverExporter(config, logger) + case Prometheus: + e, err = newPrometheusExporter(config, logger) + default: + err = fmt.Errorf("Unsupported metrics backend %v", config.backendDestination) + } + if err != nil { + return nil, err + } + return e, nil +} + +func getCurMetricsExporter() view.Exporter { + metricsMux.Lock() + defer metricsMux.Unlock() + return curMetricsExporter +} + +func setCurMetricsExporter(e view.Exporter) { + metricsMux.Lock() + defer metricsMux.Unlock() + view.RegisterExporter(e) + curMetricsExporter = e +} + +func getCurMetricsConfig() *metricsConfig { + metricsMux.Lock() + defer metricsMux.Unlock() + return curMetricsConfig +} + +func setCurMetricsConfig(c *metricsConfig) { + metricsMux.Lock() + defer metricsMux.Unlock() + if c != nil { + view.SetReportingPeriod(c.reportingPeriod) + } else { + // Setting to 0 enables the default behavior. + view.SetReportingPeriod(0) + } + curMetricsConfig = c +} + +// FlushExporter waits for exported data to be uploaded. +// This should be called before the process shuts down or exporter is replaced. +// Return value indicates whether the exporter is flushable or not. +func FlushExporter() bool { + e := getCurMetricsExporter() + if e == nil { + return false + } + + if f, ok := e.(flushable); ok { + f.Flush() + return true + } + return false +} diff --git a/vendor/github.com/knative/pkg/metrics/gcp_metadata.go b/vendor/github.com/knative/pkg/metrics/gcp_metadata.go new file mode 100644 index 000000000..ed64fb733 --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/gcp_metadata.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "cloud.google.com/go/compute/metadata" + "github.com/knative/pkg/metrics/metricskey" +) + +func retrieveGCPMetadata() *gcpMetadata { + gm := gcpMetadata{ + project: metricskey.ValueUnknown, + location: metricskey.ValueUnknown, + cluster: metricskey.ValueUnknown, + } + project, err := metadata.NumericProjectID() + if err == nil && project != "" { + gm.project = project + } + location, err := metadata.Zone() + if err == nil && location != "" { + gm.location = location + } + cluster, err := metadata.InstanceAttributeValue("cluster-name") + if err == nil && cluster != "" { + gm.cluster = cluster + } + return &gm +} diff --git a/vendor/github.com/knative/pkg/metrics/metricskey/constants.go b/vendor/github.com/knative/pkg/metrics/metricskey/constants.go new file mode 100644 index 000000000..f941f222a --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/metricskey/constants.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricskey + +import "k8s.io/apimachinery/pkg/util/sets" + +const ( + // ResourceTypeKnativeRevision is the Stackdriver resource type for Knative revision + ResourceTypeKnativeRevision = "knative_revision" + + // LabelProject is the label for project (e.g. GCP GAIA ID, AWS project name) + LabelProject = "project_id" + + // LabelLocation is the label for location (e.g. GCE zone, AWS region) where the service is deployed + LabelLocation = "location" + + // LabelClusterName is the label for immutable name of the cluster + LabelClusterName = "cluster_name" + + // LabelNamespaceName is the label for immutable name of the namespace that the service is deployed + LabelNamespaceName = "namespace_name" + + // LabelServiceName is the label for the deployed service name + LabelServiceName = "service_name" + + // LabelRouteName is the label for immutable name of the route that receives the request + LabelRouteName = "route_name" + + // LabelConfigurationName is the label for the configuration which created the monitored revision + LabelConfigurationName = "configuration_name" + + // LabelRevisionName is the label for the monitored revision + LabelRevisionName = "revision_name" + + // ValueUnknown is the default value if the field is unknown, e.g. project will be unknown if Knative + // is not running on GKE. + ValueUnknown = "unknown" +) + +var ( + // KnativeRevisionLabels stores the set of resource labels for resource type knative_revision. + // LabelRouteName is added as extra label since it is optional, not in this map. + KnativeRevisionLabels = sets.NewString( + LabelProject, + LabelLocation, + LabelClusterName, + LabelNamespaceName, + LabelServiceName, + LabelConfigurationName, + LabelRevisionName, + ) + + // KnativeRevisionMetrics stores a set of metric types which are supported + // by resource type knative_revision. + KnativeRevisionMetrics = sets.NewString( + "knative.dev/serving/activator/request_count", + "knative.dev/serving/activator/request_latencies", + "knative.dev/serving/autoscaler/desired_pods", + "knative.dev/serving/autoscaler/requested_pods", + "knative.dev/serving/autoscaler/actual_pods", + "knative.dev/serving/autoscaler/stable_request_concurrency", + "knative.dev/serving/autoscaler/panic_request_concurrency", + "knative.dev/serving/autoscaler/target_concurrency_per_pod", + "knative.dev/serving/autoscaler/panic_mode", + "knative.dev/serving/revision/request_count", + "knative.dev/serving/revision/request_latencies", + ) +) diff --git a/vendor/github.com/knative/pkg/metrics/monitored_resources.go b/vendor/github.com/knative/pkg/metrics/monitored_resources.go new file mode 100644 index 000000000..295fb778f --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/monitored_resources.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/knative/pkg/metrics/metricskey" +) + +type gcpMetadata struct { + project string + location string + cluster string +} + +type KnativeRevision struct { + Project string + Location string + ClusterName string + NamespaceName string + ServiceName string + ConfigurationName string + RevisionName string +} + +func (kr *KnativeRevision) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + metricskey.LabelProject: kr.Project, + metricskey.LabelLocation: kr.Location, + metricskey.LabelClusterName: kr.ClusterName, + metricskey.LabelNamespaceName: kr.NamespaceName, + metricskey.LabelServiceName: kr.ServiceName, + metricskey.LabelConfigurationName: kr.ConfigurationName, + metricskey.LabelRevisionName: kr.RevisionName, + } + return "knative_revision", labels +} + +type Global struct{} + +func (g *Global) MonitoredResource() (resType string, labels map[string]string) { + return "global", nil +} diff --git a/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go b/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go new file mode 100644 index 000000000..c3c0d55d0 --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net/http" + "sync" + + "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/stats/view" + "go.uber.org/zap" +) + +var ( + curPromSrv *http.Server + curPromSrvMux sync.Mutex +) + +func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component}) + if err != nil { + logger.Errorw("Failed to create the Prometheus exporter.", zap.Error(err)) + return nil, err + } + logger.Infof("Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config) + // Start the server for Prometheus scraping + go func() { + srv := startNewPromSrv(e, config.prometheusPort) + srv.ListenAndServe() + }() + return e, nil +} + +func getCurPromSrv() *http.Server { + curPromSrvMux.Lock() + defer curPromSrvMux.Unlock() + return curPromSrv +} + +func resetCurPromSrv() { + curPromSrvMux.Lock() + defer curPromSrvMux.Unlock() + if curPromSrv != nil { + curPromSrv.Close() + curPromSrv = nil + } +} + +func startNewPromSrv(e *prometheus.Exporter, port int) *http.Server { + sm := http.NewServeMux() + sm.Handle("/metrics", e) + curPromSrvMux.Lock() + defer curPromSrvMux.Unlock() + if curPromSrv != nil { + curPromSrv.Close() + } + curPromSrv = &http.Server{ + Addr: fmt.Sprintf(":%v", port), + Handler: sm, + } + return curPromSrv +} diff --git a/vendor/github.com/knative/pkg/metrics/record.go b/vendor/github.com/knative/pkg/metrics/record.go new file mode 100644 index 000000000..98a007cfe --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/record.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "path" + + "github.com/knative/pkg/metrics/metricskey" + "go.opencensus.io/stats" +) + +// Record decides whether to record one measurement via OpenCensus based on the +// following conditions: +// 1) No package level metrics config. In this case it just proxies to OpenCensus +// based on the assumption that users expect the metrics to be recorded when +// they call this function. Users must ensure metrics config are set before +// using this function to get expected behavior. +// 2) The backend is not Stackdriver. +// 3) The backend is Stackdriver and it is allowed to use custom metrics. +// 4) The backend is Stackdriver and the metric is "knative_revison" built-in metric. +func Record(ctx context.Context, ms stats.Measurement) { + mc := getCurMetricsConfig() + + // Condition 1) + if mc == nil { + stats.Record(ctx, ms) + return + } + + // Condition 2) and 3) + if !mc.isStackdriverBackend || mc.allowStackdriverCustomMetrics { + stats.Record(ctx, ms) + return + } + + // Condition 4) + metricType := path.Join(mc.stackdriverMetricTypePrefix, ms.Measure().Name()) + if metricskey.KnativeRevisionMetrics.Has(metricType) { + stats.Record(ctx, ms) + } +} diff --git a/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go b/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go new file mode 100644 index 000000000..60bf1d5f3 --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "path" + + "contrib.go.opencensus.io/exporter/stackdriver" + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "github.com/knative/pkg/metrics/metricskey" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.uber.org/zap" +) + +// customMetricTypePrefix is the metric type prefix for unsupported metrics by +// resource type knative_revision. +// See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor +const customMetricTypePrefix = "custom.googleapis.com/knative.dev" + +var ( + // gcpMetadataFunc is the function used to fetch GCP metadata. + // In product usage, this is always set to function retrieveGCPMetadata. + // In unit tests this is set to a fake one to avoid calling GCP metadata + // service. + gcpMetadataFunc func() *gcpMetadata + + // newStackdriverExporterFunc is the function used to create new stackdriver + // exporter. + // In product usage, this is always set to function newOpencensusSDExporter. + // In unit tests this is set to a fake one to avoid calling actual Google API + // service. + newStackdriverExporterFunc func(stackdriver.Options) (view.Exporter, error) +) + +func init() { + // Set gcpMetadataFunc to call GCP metadata service. + gcpMetadataFunc = retrieveGCPMetadata + + newStackdriverExporterFunc = newOpencensusSDExporter +} + +func newOpencensusSDExporter(o stackdriver.Options) (view.Exporter, error) { + return stackdriver.NewExporter(o) +} + +func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + gm := gcpMetadataFunc() + mtf := getMetricTypeFunc(config.stackdriverMetricTypePrefix, config.stackdriverCustomMetricTypePrefix) + e, err := newStackdriverExporterFunc(stackdriver.Options{ + ProjectID: config.stackdriverProjectID, + GetMetricDisplayName: mtf, // Use metric type for display name for custom metrics. No impact on built-in metrics. + GetMetricType: mtf, + GetMonitoredResource: getMonitoredResourceFunc(config.stackdriverMetricTypePrefix, gm), + DefaultMonitoringLabels: &stackdriver.Labels{}, + }) + if err != nil { + logger.Errorw("Failed to create the Stackdriver exporter: ", zap.Error(err)) + return nil, err + } + logger.Infof("Created Opencensus Stackdriver exporter with config %v", config) + return e, nil +} + +func getMonitoredResourceFunc(metricTypePrefix string, gm *gcpMetadata) func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) { + return func(view *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) { + metricType := path.Join(metricTypePrefix, view.Measure.Name()) + if metricskey.KnativeRevisionMetrics.Has(metricType) { + return getKnativeRevisionMonitoredResource(view, tags, gm) + } + // Unsupported metric by knative_revision, use "global" resource type. + return getGlobalMonitoredResource(view, tags) + } +} + +func getKnativeRevisionMonitoredResource( + v *view.View, tags []tag.Tag, gm *gcpMetadata) ([]tag.Tag, monitoredresource.Interface) { + tagsMap := getTagsMap(tags) + kr := &KnativeRevision{ + // The first three resource labels are from metadata. + Project: gm.project, + Location: gm.location, + ClusterName: gm.cluster, + // The rest resource labels are from metrics labels. + NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap), + ServiceName: valueOrUnknown(metricskey.LabelServiceName, tagsMap), + ConfigurationName: valueOrUnknown(metricskey.LabelConfigurationName, tagsMap), + RevisionName: valueOrUnknown(metricskey.LabelRevisionName, tagsMap), + } + + var newTags []tag.Tag + for _, t := range tags { + // Keep the metrics labels that are not resource labels + if !metricskey.KnativeRevisionLabels.Has(t.Key.Name()) { + newTags = append(newTags, t) + } + } + + return newTags, kr +} + +func getTagsMap(tags []tag.Tag) map[string]string { + tagsMap := map[string]string{} + for _, t := range tags { + tagsMap[t.Key.Name()] = t.Value + } + return tagsMap +} + +func valueOrUnknown(key string, tagsMap map[string]string) string { + if value, ok := tagsMap[key]; ok { + return value + } + return metricskey.ValueUnknown +} + +func getGlobalMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) { + return tags, &Global{} +} + +func getMetricTypeFunc(metricTypePrefix, customMetricTypePrefix string) func(view *view.View) string { + return func(view *view.View) string { + metricType := path.Join(metricTypePrefix, view.Measure.Name()) + if metricskey.KnativeRevisionMetrics.Has(metricType) { + return metricType + } + // Unsupported metric by knative_revision, use custom domain. + return path.Join(customMetricTypePrefix, view.Measure.Name()) + } +} diff --git a/vendor/github.com/knative/pkg/metrics/testing/config.go b/vendor/github.com/knative/pkg/metrics/testing/config.go new file mode 100644 index 000000000..5b19816d7 --- /dev/null +++ b/vendor/github.com/knative/pkg/metrics/testing/config.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "os" + + "github.com/knative/pkg/metrics" +) + +func init() { + os.Setenv(metrics.DomainEnv, "knative.dev/testing") +} diff --git a/vendor/github.com/knative/pkg/ptr/doc.go b/vendor/github.com/knative/pkg/ptr/doc.go new file mode 100644 index 000000000..1ebcea284 --- /dev/null +++ b/vendor/github.com/knative/pkg/ptr/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ptr holds utilities for taking pointer references to values. +package ptr diff --git a/vendor/github.com/knative/pkg/ptr/ptr.go b/vendor/github.com/knative/pkg/ptr/ptr.go new file mode 100644 index 000000000..356464733 --- /dev/null +++ b/vendor/github.com/knative/pkg/ptr/ptr.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +// Int32 is a helper for turning integers into pointers for use in +// API types that want *int32. +func Int32(i int32) *int32 { + return &i +} + +// Int64 is a helper for turning integers into pointers for use in +// API types that want *int64. +func Int64(i int64) *int64 { + return &i +} + +// Bool is a helper for turning bools into pointers for use in +// API types that want *bool. +func Bool(b bool) *bool { + return &b +} + +// String is a helper for turning strings into pointers for use in +// API types that want *string. +func String(s string) *string { + return &s +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/actions.go b/vendor/github.com/knative/pkg/reconciler/testing/actions.go new file mode 100644 index 000000000..7dc967b92 --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/actions.go @@ -0,0 +1,76 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + + clientgotesting "k8s.io/client-go/testing" +) + +// Actions stores list of Actions recorded by the reactors. +type Actions struct { + Gets []clientgotesting.GetAction + Creates []clientgotesting.CreateAction + Updates []clientgotesting.UpdateAction + Deletes []clientgotesting.DeleteAction + DeleteCollections []clientgotesting.DeleteCollectionAction + Patches []clientgotesting.PatchAction +} + +// ActionRecorder contains list of K8s request actions. +type ActionRecorder interface { + Actions() []clientgotesting.Action +} + +// ActionRecorderList is a list of ActionRecorder objects. +type ActionRecorderList []ActionRecorder + +// ActionsByVerb fills in Actions objects, sorting the actions +// by verb. +func (l ActionRecorderList) ActionsByVerb() (Actions, error) { + var a Actions + + for _, recorder := range l { + for _, action := range recorder.Actions() { + switch action.GetVerb() { + case "get": + a.Gets = append(a.Gets, + action.(clientgotesting.GetAction)) + case "create": + a.Creates = append(a.Creates, + action.(clientgotesting.CreateAction)) + case "update": + a.Updates = append(a.Updates, + action.(clientgotesting.UpdateAction)) + case "delete": + a.Deletes = append(a.Deletes, + action.(clientgotesting.DeleteAction)) + case "delete-collection": + a.DeleteCollections = append(a.DeleteCollections, + action.(clientgotesting.DeleteCollectionAction)) + case "patch": + a.Patches = append(a.Patches, + action.(clientgotesting.PatchAction)) + case "list", "watch": // avoid 'unexpected verb list/watch' error + default: + return a, fmt.Errorf("unexpected verb %v: %+v", action.GetVerb(), action) + } + } + } + return a, nil +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/clock.go b/vendor/github.com/knative/pkg/reconciler/testing/clock.go new file mode 100644 index 000000000..44ba77cdb --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/clock.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "time" +) + +type FakeClock struct { + Time time.Time +} + +func (c FakeClock) Now() time.Time { + return c.Time +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/expansion_generated.go b/vendor/github.com/knative/pkg/reconciler/testing/context.go similarity index 50% rename from vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/expansion_generated.go rename to vendor/github.com/knative/pkg/reconciler/testing/context.go index 2681a29f4..f9692c810 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/expansion_generated.go +++ b/vendor/github.com/knative/pkg/reconciler/testing/context.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,18 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by lister-gen. DO NOT EDIT. +package testing -package v1 +import ( + "context" + "testing" -// ClusterTestTypeListerExpansion allows custom methods to be added to -// ClusterTestTypeLister. -type ClusterTestTypeListerExpansion interface{} + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" -// TestTypeListerExpansion allows custom methods to be added to -// TestTypeLister. -type TestTypeListerExpansion interface{} + "github.com/knative/pkg/controller" + "github.com/knative/pkg/injection" + logtesting "github.com/knative/pkg/logging/testing" +) -// TestTypeNamespaceListerExpansion allows custom methods to be added to -// TestTypeNamespaceLister. -type TestTypeNamespaceListerExpansion interface{} +func SetupFakeContext(t *testing.T) (context.Context, []controller.Informer) { + ctx := logtesting.TestContextWithLogger(t) + ctx = controller.WithEventRecorder(ctx, record.NewFakeRecorder(1000)) + return injection.Fake.SetupInformers(ctx, &rest.Config{}) +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/events.go b/vendor/github.com/knative/pkg/reconciler/testing/events.go new file mode 100644 index 000000000..498b04f7e --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/events.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + + "k8s.io/client-go/tools/record" +) + +// EventList exports all events during reconciliation through fake event recorder +// with event channel with buffer of given size. +type EventList struct { + Recorder *record.FakeRecorder +} + +// Events iterates over events received from channel in fake event recorder and returns all. +func (l EventList) Events() []string { + close(l.Recorder.Events) + events := []string{} + for e := range l.Recorder.Events { + events = append(events, e) + } + return events +} + +// Eventf formats as FakeRecorder does. +func Eventf(eventType, reason, messageFmt string, args ...interface{}) string { + return fmt.Sprintf(eventType+" "+reason+" "+messageFmt, args...) +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go b/vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go new file mode 100644 index 000000000..52ac44951 --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync/atomic" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" +) + +// GenerateNameReactor will simulate the k8s API server +// and generate a name for resources who's metadata.generateName +// property is set. This happens only for CreateAction types +// +// This generator is deterministic (unliked k8s) and uses a global +// counter to help make test names predictable +type GenerateNameReactor struct { + count int64 +} + +// Handles contains all the logic to generate the name and mutates +// the create action object +// +// This is a hack as 'React' is passed a DeepCopy of the action hence +// this is the only opportunity to 'mutate' the action in the +// ReactionChain and have to continue executing additional reactors +// +// We should push changes upstream to client-go to help us with +// mocking +func (r *GenerateNameReactor) Handles(action clientgotesting.Action) bool { + create, ok := action.(clientgotesting.CreateAction) + + if !ok { + return false + } + + objMeta, err := meta.Accessor(create.GetObject()) + + if err != nil { + return false + } + + if objMeta.GetName() != "" { + return false + } + + if objMeta.GetGenerateName() == "" { + return false + } + + val := atomic.AddInt64(&r.count, 1) + + objMeta.SetName(fmt.Sprintf("%s%05d", objMeta.GetGenerateName(), val)) + + return false +} + +// React is noop-function +func (r *GenerateNameReactor) React(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + return false, nil, nil +} + +var _ clientgotesting.Reactor = (*GenerateNameReactor)(nil) + +// PrependGenerateNameReactor will instrument a client-go testing Fake +// with a reactor that simulates 'generateName' functionality +func PrependGenerateNameReactor(f *clientgotesting.Fake) { + f.ReactionChain = append([]clientgotesting.Reactor{&GenerateNameReactor{}}, f.ReactionChain...) +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/hooks.go b/vendor/github.com/knative/pkg/reconciler/testing/hooks.go new file mode 100644 index 000000000..cde3d7d21 --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/hooks.go @@ -0,0 +1,183 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing includes utilities for testing controllers. +package testing + +import ( + "errors" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/runtime" + kubetesting "k8s.io/client-go/testing" +) + +// HookResult is the return value of hook functions. +type HookResult bool + +const ( + // HookComplete indicates the hook function completed, and WaitForHooks should + // not wait for it. + HookComplete HookResult = true + // HookIncomplete indicates the hook function is incomplete, and WaitForHooks + // should wait for it to complete. + HookIncomplete HookResult = false +) + +/* +CreateHookFunc is a function for handling a Create hook. Its runtime.Object +parameter will be the Kubernetes resource created. The resource can be cast +to its actual type like this: + + pod := obj.(*v1.Pod) + +A return value of true marks the hook as completed. Returning false allows +the hook to run again when the next resource of the requested type is +created. +*/ +type CreateHookFunc func(runtime.Object) HookResult + +/* +UpdateHookFunc is a function for handling an update hook. its runtime.Object +parameter will be the Kubernetes resource updated. The resource can be cast +to its actual type like this: + + pod := obj.(*v1.Pod) + +A return value of true marks the hook as completed. Returning false allows +the hook to run again when the next resource of the requested type is +updated. +*/ +type UpdateHookFunc func(runtime.Object) HookResult + +/* +DeleteHookFunc is a function for handling a delete hook. Its name parameter will +be the name of the resource deleted. The resource itself is not available to +the reactor. +*/ +type DeleteHookFunc func(string) HookResult + +/* +Hooks is a utility struct that simplifies controller testing with fake +clients. A Hooks struct allows attaching hook functions to actions (create, +update, delete) on a specified resource type within a fake client and ensuring +that all hooks complete in a timely manner. +*/ +type Hooks struct { + completionCh chan int32 + completionIndex int32 + + // Denotes whether or not the registered hooks should no longer be called + // because they have already been waited upon. + // This uses a Mutex over a channel to guarantee that after WaitForHooks + // returns no hooked functions will be called. + closed bool + mutex sync.RWMutex +} + +// NewHooks returns a Hooks struct that can be used to attach hooks to one or +// more fake clients and wait for all hooks to complete. +// TODO(grantr): Allow validating that a hook never fires +func NewHooks() *Hooks { + return &Hooks{ + completionCh: make(chan int32, 100), + completionIndex: -1, + } +} + +// OnCreate attaches a create hook to the given Fake. The hook function is +// executed every time a resource of the given type is created. +func (h *Hooks) OnCreate(fake *kubetesting.Fake, resource string, rf CreateHookFunc) { + index := atomic.AddInt32(&h.completionIndex, 1) + fake.PrependReactor("create", resource, func(a kubetesting.Action) (bool, runtime.Object, error) { + obj := a.(kubetesting.CreateActionImpl).Object + + h.mutex.RLock() + defer h.mutex.RUnlock() + if !h.closed && rf(obj) == HookComplete { + h.completionCh <- index + } + return false, nil, nil + }) +} + +// OnUpdate attaches an update hook to the given Fake. The hook function is +// executed every time a resource of the given type is updated. +func (h *Hooks) OnUpdate(fake *kubetesting.Fake, resource string, rf UpdateHookFunc) { + index := atomic.AddInt32(&h.completionIndex, 1) + fake.PrependReactor("update", resource, func(a kubetesting.Action) (bool, runtime.Object, error) { + obj := a.(kubetesting.UpdateActionImpl).Object + + h.mutex.RLock() + defer h.mutex.RUnlock() + if !h.closed && rf(obj) == HookComplete { + h.completionCh <- index + } + return false, nil, nil + }) +} + +// OnDelete attaches a delete hook to the given Fake. The hook function is +// executed every time a resource of the given type is deleted. +func (h *Hooks) OnDelete(fake *kubetesting.Fake, resource string, rf DeleteHookFunc) { + index := atomic.AddInt32(&h.completionIndex, 1) + fake.PrependReactor("delete", resource, func(a kubetesting.Action) (bool, runtime.Object, error) { + name := a.(kubetesting.DeleteActionImpl).Name + + h.mutex.RLock() + defer h.mutex.RUnlock() + if !h.closed && rf(name) == HookComplete { + h.completionCh <- index + } + return false, nil, nil + }) +} + +// WaitForHooks waits until all attached hooks have returned true at least once. +// If the given timeout expires before that happens, an error is returned. +// The registered actions will no longer be executed after WaitForHooks has +// returned. +func (h *Hooks) WaitForHooks(timeout time.Duration) error { + defer func() { + h.mutex.Lock() + defer h.mutex.Unlock() + h.closed = true + }() + + ci := int(atomic.LoadInt32(&h.completionIndex)) + if ci == -1 { + return nil + } + + // Convert index to count. + ci++ + timer := time.After(timeout) + hookCompletions := map[int32]HookResult{} + for { + select { + case i := <-h.completionCh: + hookCompletions[i] = HookComplete + if len(hookCompletions) == ci { + atomic.StoreInt32(&h.completionIndex, -1) + return nil + } + case <-timer: + return errors.New("timed out waiting for hooks to complete") + } + } +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/reactions.go b/vendor/github.com/knative/pkg/reconciler/testing/reactions.go new file mode 100644 index 000000000..9ce0e4044 --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/reactions.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + + "github.com/knative/pkg/apis" +) + +// InduceFailure is used in conjunction with TableTest's WithReactors field. +// Tests that want to induce a failure in a row of a TableTest would add: +// WithReactors: []clientgotesting.ReactionFunc{ +// // Makes calls to create revisions return an error. +// InduceFailure("create", "revisions"), +// }, +func InduceFailure(verb, resource string) clientgotesting.ReactionFunc { + return func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + if !action.Matches(verb, resource) { + return false, nil, nil + } + return true, nil, fmt.Errorf("inducing failure for %s %s", action.GetVerb(), action.GetResource().Resource) + } +} + +func ValidateCreates(ctx context.Context, action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + got := action.(clientgotesting.CreateAction).GetObject() + obj, ok := got.(apis.Validatable) + if !ok { + return false, nil, nil + } + if err := obj.Validate(ctx); err != nil { + return true, nil, err + } + return false, nil, nil +} + +func ValidateUpdates(ctx context.Context, action clientgotesting.Action) (handled bool, ret runtime.Object, err error) { + got := action.(clientgotesting.UpdateAction).GetObject() + obj, ok := got.(apis.Validatable) + if !ok { + return false, nil, nil + } + if err := obj.Validate(ctx); err != nil { + return true, nil, err + } + return false, nil, nil +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/sorter.go b/vendor/github.com/knative/pkg/reconciler/testing/sorter.go new file mode 100644 index 000000000..27061b69d --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/sorter.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package testing + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + util_runtime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" +) + +func NewObjectSorter(scheme *runtime.Scheme) ObjectSorter { + cache := make(map[reflect.Type]cache.Indexer) + + for _, v := range scheme.AllKnownTypes() { + cache[v] = emptyIndexer() + } + + ls := ObjectSorter{ + cache: cache, + } + + return ls +} + +type ObjectSorter struct { + cache map[reflect.Type]cache.Indexer +} + +func (o *ObjectSorter) AddObjects(objs ...runtime.Object) { + for _, obj := range objs { + t := reflect.TypeOf(obj).Elem() + indexer, ok := o.cache[t] + if !ok { + panic(fmt.Sprintf("Unrecognized type %T", obj)) + } + indexer.Add(obj) + } +} + +func (o *ObjectSorter) ObjectsForScheme(scheme *runtime.Scheme) []runtime.Object { + var objs []runtime.Object + + for _, t := range scheme.AllKnownTypes() { + indexer := o.cache[t] + for _, item := range indexer.List() { + objs = append(objs, item.(runtime.Object)) + } + } + + return objs +} + +func (o *ObjectSorter) ObjectsForSchemeFunc(funcs ...func(scheme *runtime.Scheme) error) []runtime.Object { + scheme := runtime.NewScheme() + + for _, addToScheme := range funcs { + util_runtime.Must(addToScheme(scheme)) + } + + return o.ObjectsForScheme(scheme) +} + +func (o *ObjectSorter) IndexerForObjectType(obj runtime.Object) cache.Indexer { + objType := reflect.TypeOf(obj).Elem() + + indexer, ok := o.cache[objType] + + if !ok { + panic(fmt.Sprintf("indexer for type %v doesn't exist", objType.Name())) + } + + return indexer +} + +func emptyIndexer() cache.Indexer { + return cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/stats.go b/vendor/github.com/knative/pkg/reconciler/testing/stats.go new file mode 100644 index 000000000..1d389a15d --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/stats.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "time" +) + +// FakeStatsReporter is a fake implementation of StatsReporter +type FakeStatsReporter struct { + servicesReady map[string]int +} + +func (r *FakeStatsReporter) ReportServiceReady(namespace, service string, d time.Duration) error { + key := fmt.Sprintf("%s/%s", namespace, service) + if r.servicesReady == nil { + r.servicesReady = make(map[string]int) + } + r.servicesReady[key]++ + return nil +} + +func (r *FakeStatsReporter) GetServiceReadyStats() map[string]int { + return r.servicesReady +} diff --git a/vendor/github.com/knative/pkg/reconciler/testing/table.go b/vendor/github.com/knative/pkg/reconciler/testing/table.go new file mode 100644 index 000000000..2d445cd06 --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/table.go @@ -0,0 +1,365 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "path" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/knative/pkg/controller" + "github.com/knative/pkg/kmeta" + _ "github.com/knative/pkg/system/testing" // Setup system.Namespace() + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" +) + +// TableRow holds a single row of our table test. +type TableRow struct { + // Name is a descriptive name for this test suitable as a first argument to t.Run() + Name string + + // Ctx is the context to pass to Reconcile. Defaults to context.Background() + Ctx context.Context + + // Objects holds the state of the world at the onset of reconciliation. + Objects []runtime.Object + + // Key is the parameter to reconciliation. + // This has the form "namespace/name". + Key string + + // WantErr holds whether we should expect the reconciliation to result in an error. + WantErr bool + + // WantCreates holds the ordered list of Create calls we expect during reconciliation. + WantCreates []runtime.Object + + // WantUpdates holds the ordered list of Update calls we expect during reconciliation. + WantUpdates []clientgotesting.UpdateActionImpl + + // WantStatusUpdates holds the ordered list of Update calls, with `status` subresource set, + // that we expect during reconciliation. + WantStatusUpdates []clientgotesting.UpdateActionImpl + + // WantDeletes holds the ordered list of Delete calls we expect during reconciliation. + WantDeletes []clientgotesting.DeleteActionImpl + + // WantDeleteCollections holds the ordered list of DeleteCollection calls we expect during reconciliation. + WantDeleteCollections []clientgotesting.DeleteCollectionActionImpl + + // WantPatches holds the ordered list of Patch calls we expect during reconciliation. + WantPatches []clientgotesting.PatchActionImpl + + // WantEvents holds the ordered list of events we expect during reconciliation. + WantEvents []string + + // WantServiceReadyStats holds the ServiceReady stats we exepect during reconciliation. + WantServiceReadyStats map[string]int + + // WithReactors is a set of functions that are installed as Reactors for the execution + // of this row of the table-driven-test. + WithReactors []clientgotesting.ReactionFunc + + // For cluster-scoped resources like ClusterIngress, it does not have to be + // in the same namespace with its child resources. + SkipNamespaceValidation bool +} + +func objKey(o runtime.Object) string { + on := o.(kmeta.Accessor) + // namespace + name is not unique, and the tests don't populate k8s kind + // information, so use GoLang's type name as part of the key. + return path.Join(reflect.TypeOf(o).String(), on.GetNamespace(), on.GetName()) +} + +// Factory returns a Reconciler.Interface to perform reconciliation in table test, +// ActionRecorderList/EventList to capture k8s actions/events produced during reconciliation +// and FakeStatsReporter to capture stats. +type Factory func(*testing.T, *TableRow) (controller.Reconciler, ActionRecorderList, EventList, *FakeStatsReporter) + +// Test executes the single table test. +func (r *TableRow) Test(t *testing.T, factory Factory) { + t.Helper() + c, recorderList, eventList, statsReporter := factory(t, r) + + // Set context to not be nil. + ctx := r.Ctx + if ctx == nil { + ctx = context.Background() + } + + // Run the Reconcile we're testing. + if err := c.Reconcile(ctx, r.Key); (err != nil) != r.WantErr { + t.Errorf("Reconcile() error = %v, WantErr %v", err, r.WantErr) + } + + expectedNamespace, _, _ := cache.SplitMetaNamespaceKey(r.Key) + + actions, err := recorderList.ActionsByVerb() + if err != nil { + t.Errorf("Error capturing actions by verb: %q", err) + } + + // Previous state is used to diff resource expected state for update requests that were missed. + objPrevState := map[string]runtime.Object{} + for _, o := range r.Objects { + objPrevState[objKey(o)] = o + } + + for i, want := range r.WantCreates { + if i >= len(actions.Creates) { + t.Errorf("Missing create: %#v", want) + continue + } + got := actions.Creates[i] + obj := got.GetObject() + objPrevState[objKey(obj)] = obj + + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected action[%d]: %#v", i, got) + } + + if diff := cmp.Diff(want, obj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected create (-want, +got): %s", diff) + } + } + if got, want := len(actions.Creates), len(r.WantCreates); got > want { + for _, extra := range actions.Creates[want:] { + t.Errorf("Extra create: %#v", extra.GetObject()) + } + } + + updates := filterUpdatesWithSubresource("", actions.Updates) + for i, want := range r.WantUpdates { + if i >= len(updates) { + wo := want.GetObject() + key := objKey(wo) + oldObj, ok := objPrevState[key] + if !ok { + t.Errorf("Object %s was never created: want: %#v", key, wo) + continue + } + t.Errorf("Missing update for %s (-want, +prevState): %s", key, + cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())) + continue + } + + if want.GetSubresource() != "" { + t.Errorf("Expectation was invalid - it should not include a subresource: %#v", want) + } + + got := updates[i].GetObject() + + // Update the object state. + objPrevState[objKey(got)] = got + + if diff := cmp.Diff(want.GetObject(), got, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected update (-want, +got): %s", diff) + } + } + if got, want := len(updates), len(r.WantUpdates); got > want { + for _, extra := range updates[want:] { + t.Errorf("Extra update: %#v", extra.GetObject()) + } + } + + // TODO(#2843): refactor. + statusUpdates := filterUpdatesWithSubresource("status", actions.Updates) + for i, want := range r.WantStatusUpdates { + if i >= len(statusUpdates) { + wo := want.GetObject() + key := objKey(wo) + oldObj, ok := objPrevState[key] + if !ok { + t.Errorf("Object %s was never created: want: %#v", key, wo) + continue + } + t.Errorf("Missing status update for %s (-want, +prevState): %s", key, + cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())) + continue + } + + got := statusUpdates[i].GetObject() + + // Update the object state. + objPrevState[objKey(got)] = got + + if diff := cmp.Diff(want.GetObject(), got, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected status update (-want, +got): %s\nFull: %v", diff, got) + } + } + if got, want := len(statusUpdates), len(r.WantStatusUpdates); got > want { + for _, extra := range statusUpdates[want:] { + wo := extra.GetObject() + key := objKey(wo) + oldObj, ok := objPrevState[key] + if !ok { + t.Errorf("Object %s was never created: want: %#v", key, wo) + continue + } + t.Errorf("Extra status update for %s (-extra, +prevState): %s", key, + cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())) + } + } + + if len(statusUpdates)+len(updates) != len(actions.Updates) { + var unexpected []runtime.Object + + for _, update := range actions.Updates { + if update.GetSubresource() != "status" && update.GetSubresource() != "" { + unexpected = append(unexpected, update.GetObject()) + } + } + + t.Errorf("Unexpected subresource updates occurred %#v", unexpected) + } + + for i, want := range r.WantDeletes { + if i >= len(actions.Deletes) { + t.Errorf("Missing delete: %#v", want) + continue + } + got := actions.Deletes[i] + if got.GetName() != want.GetName() { + t.Errorf("Unexpected delete[%d]: %#v", i, got) + } + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected delete[%d]: %#v", i, got) + } + } + if got, want := len(actions.Deletes), len(r.WantDeletes); got > want { + for _, extra := range actions.Deletes[want:] { + t.Errorf("Extra delete: %s/%s", extra.GetNamespace(), extra.GetName()) + } + } + + for i, want := range r.WantDeleteCollections { + if i >= len(actions.DeleteCollections) { + t.Errorf("Missing delete-collection: %#v", want) + continue + } + got := actions.DeleteCollections[i] + if got, want := got.GetListRestrictions().Labels, want.GetListRestrictions().Labels; (got != nil) != (want != nil) || got.String() != want.String() { + t.Errorf("Unexpected delete-collection[%d].Labels = %v, wanted %v", i, got, want) + } + if got, want := got.GetListRestrictions().Fields, want.GetListRestrictions().Fields; (got != nil) != (want != nil) || got.String() != want.String() { + t.Errorf("Unexpected delete-collection[%d].Fields = %v, wanted %v", i, got, want) + } + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected delete-collection[%d]: %#v, wanted %s", i, got, expectedNamespace) + } + } + if got, want := len(actions.DeleteCollections), len(r.WantDeleteCollections); got > want { + for _, extra := range actions.DeleteCollections[want:] { + t.Errorf("Extra delete-collection: %#v", extra) + } + } + + for i, want := range r.WantPatches { + if i >= len(actions.Patches) { + t.Errorf("Missing patch: %#v; raw: %s", want, string(want.GetPatch())) + continue + } + + got := actions.Patches[i] + if got.GetName() != want.GetName() { + t.Errorf("Unexpected patch[%d]: %#v", i, got) + } + if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace { + t.Errorf("Unexpected patch[%d]: %#v", i, got) + } + if diff := cmp.Diff(string(want.GetPatch()), string(got.GetPatch())); diff != "" { + t.Errorf("Unexpected patch(-want, +got): %s", diff) + } + } + if got, want := len(actions.Patches), len(r.WantPatches); got > want { + for _, extra := range actions.Patches[want:] { + t.Errorf("Extra patch: %#v; raw: %s", extra, string(extra.GetPatch())) + } + } + + gotEvents := eventList.Events() + for i, want := range r.WantEvents { + if i >= len(gotEvents) { + t.Errorf("Missing event: %s", want) + continue + } + + if diff := cmp.Diff(want, gotEvents[i]); diff != "" { + t.Errorf("unexpected event(-want, +got): %s", diff) + } + } + if got, want := len(gotEvents), len(r.WantEvents); got > want { + for _, extra := range gotEvents[want:] { + t.Errorf("Extra event: %s", extra) + } + } + + gotStats := statsReporter.GetServiceReadyStats() + if diff := cmp.Diff(r.WantServiceReadyStats, gotStats); diff != "" { + t.Errorf("Unexpected service ready stats (-want, +got): %s", diff) + } +} + +func filterUpdatesWithSubresource( + subresource string, + actions []clientgotesting.UpdateAction) (result []clientgotesting.UpdateAction) { + for _, action := range actions { + if action.GetSubresource() == subresource { + result = append(result, action) + } + } + return +} + +// TableTest represents a list of TableRow tests instances. +type TableTest []TableRow + +// Test executes the whole suite of the table tests. +func (tt TableTest) Test(t *testing.T, factory Factory) { + t.Helper() + for _, test := range tt { + // Record the original objects in table. + originObjects := []runtime.Object{} + for _, obj := range test.Objects { + originObjects = append(originObjects, obj.DeepCopyObject()) + } + t.Run(test.Name, func(t *testing.T) { + t.Helper() + test.Test(t, factory) + }) + // Validate cached objects do not get soiled after controller loops + if diff := cmp.Diff(originObjects, test.Objects, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Unexpected objects in test %s (-want, +got): %v", test.Name, diff) + } + } +} + +var ( + ignoreLastTransitionTime = cmp.FilterPath(func(p cmp.Path) bool { + return strings.HasSuffix(p.String(), "LastTransitionTime.Inner.Time") + }, cmp.Ignore()) + + safeDeployDiff = cmpopts.IgnoreUnexported(resource.Quantity{}) +) diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.defaults.go b/vendor/github.com/knative/pkg/reconciler/testing/tracker.go similarity index 56% rename from vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.defaults.go rename to vendor/github.com/knative/pkg/reconciler/testing/tracker.go index cce2e603a..893ba86aa 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/zz_generated.defaults.go +++ b/vendor/github.com/knative/pkg/reconciler/testing/tracker.go @@ -1,7 +1,5 @@ -// +build !ignore_autogenerated - /* -Copyright The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,17 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 +package testing import ( - runtime "k8s.io/apimachinery/pkg/runtime" + corev1 "k8s.io/api/core/v1" + + "github.com/knative/pkg/tracker" ) -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} +// NullTracker implements Tracker. +type NullTracker struct{} + +var _ tracker.Interface = (*NullTracker)(nil) + +// OnChanged implements OnChanged. +func (*NullTracker) OnChanged(interface{}) {} + +// Track implements Track. +func (*NullTracker) Track(corev1.ObjectReference, interface{}) error { return nil } diff --git a/vendor/github.com/knative/pkg/reconciler/testing/util.go b/vendor/github.com/knative/pkg/reconciler/testing/util.go new file mode 100644 index 000000000..14689693f --- /dev/null +++ b/vendor/github.com/knative/pkg/reconciler/testing/util.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing includes utilities for testing controllers. +package testing + +import ( + "regexp" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +// KeyOrDie returns the string key of the Kubernetes object or panics if a key +// cannot be generated. +func KeyOrDie(obj interface{}) string { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + panic(err) + } + return key +} + +// ExpectNormalEventDelivery returns a hook function that can be passed to a +// Hooks.OnCreate() call to verify that an event of type Normal was created +// matching the given regular expression. For this expectation to be effective +// the test must also call Hooks.WaitForHooks(). +func ExpectNormalEventDelivery(t *testing.T, messageRegexp string) CreateHookFunc { + t.Helper() + wantRegexp, err := regexp.Compile(messageRegexp) + if err != nil { + t.Fatalf("Invalid regular expression: %v", err) + } + return func(obj runtime.Object) HookResult { + t.Helper() + event := obj.(*corev1.Event) + if !wantRegexp.MatchString(event.Message) { + return HookIncomplete + } + t.Logf("Got an event message matching %q: %q", wantRegexp, event.Message) + if got, want := event.Type, corev1.EventTypeNormal; got != want { + t.Errorf("unexpected event Type: %q expected: %q", got, want) + } + return HookComplete + } +} + +// ExpectWarningEventDelivery returns a hook function that can be passed to a +// Hooks.OnCreate() call to verify that an event of type Warning was created +// matching the given regular expression. For this expectation to be effective +// the test must also call Hooks.WaitForHooks(). +func ExpectWarningEventDelivery(t *testing.T, messageRegexp string) CreateHookFunc { + t.Helper() + wantRegexp, err := regexp.Compile(messageRegexp) + if err != nil { + t.Fatalf("Invalid regular expression: %v", err) + } + return func(obj runtime.Object) HookResult { + t.Helper() + event := obj.(*corev1.Event) + if !wantRegexp.MatchString(event.Message) { + return HookIncomplete + } + t.Logf("Got an event message matching %q: %q", wantRegexp, event.Message) + if got, want := event.Type, corev1.EventTypeWarning; got != want { + t.Errorf("unexpected event Type: %q expected: %q", got, want) + } + return HookComplete + } +} diff --git a/vendor/github.com/knative/pkg/signals/signal.go b/vendor/github.com/knative/pkg/signals/signal.go new file mode 100644 index 000000000..01299e3ce --- /dev/null +++ b/vendor/github.com/knative/pkg/signals/signal.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "context" + "errors" + "os" + "os/signal" + "time" +) + +var onlyOneSignalHandler = make(chan struct{}) + +// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned +// which is closed on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +func SetupSignalHandler() (stopCh <-chan struct{}) { + close(onlyOneSignalHandler) // panics when called twice + + stop := make(chan struct{}) + c := make(chan os.Signal, 2) + signal.Notify(c, shutdownSignals...) + go func() { + <-c + close(stop) + <-c + os.Exit(1) // second signal. Exit directly. + }() + + return stop +} + +// NewContext creates a new context with SetupSignalHandler() +// as our Done() channel. +func NewContext() context.Context { + return &signalContext{stopCh: SetupSignalHandler()} +} + +type signalContext struct { + stopCh <-chan struct{} +} + +// Deadline implements context.Context +func (scc *signalContext) Deadline() (deadline time.Time, ok bool) { + return +} + +// Done implements context.Context +func (scc *signalContext) Done() <-chan struct{} { + return scc.stopCh +} + +// Err implements context.Context +func (scc *signalContext) Err() error { + select { + case _, ok := <-scc.Done(): + if !ok { + return errors.New("received a termination signal.") + } + default: + } + return nil +} + +// Value implements context.Context +func (scc *signalContext) Value(key interface{}) interface{} { + return nil +} diff --git a/vendor/github.com/knative/pkg/signals/signal_posix.go b/vendor/github.com/knative/pkg/signals/signal_posix.go new file mode 100644 index 000000000..b3537d0e5 --- /dev/null +++ b/vendor/github.com/knative/pkg/signals/signal_posix.go @@ -0,0 +1,26 @@ +// +build !windows + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" + "syscall" +) + +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go b/vendor/github.com/knative/pkg/signals/signal_windows.go similarity index 81% rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go rename to vendor/github.com/knative/pkg/signals/signal_windows.go index ab24f3734..a5a4026fa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go +++ b/vendor/github.com/knative/pkg/signals/signal_windows.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. +package signals -package v1 +import ( + "os" +) -type LeaseExpansion interface{} +var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/vendor/github.com/knative/pkg/system/clock.go similarity index 69% rename from vendor/k8s.io/api/auditregistration/v1alpha1/doc.go rename to vendor/github.com/knative/pkg/system/clock.go index ae8f76714..7d99d9b5c 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go +++ b/vendor/github.com/knative/pkg/system/clock.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true +package system -// +groupName=auditregistration.k8s.io +import ( + "time" +) -package v1alpha1 // import "k8s.io/api/auditregistration/v1alpha1" +// Mockable interface for time based testing +type Clock interface { + Now() time.Time +} + +type RealClock struct{} + +func (RealClock) Now() time.Time { + return time.Now() +} diff --git a/vendor/github.com/knative/pkg/system/names.go b/vendor/github.com/knative/pkg/system/names.go new file mode 100644 index 000000000..fdd6a576e --- /dev/null +++ b/vendor/github.com/knative/pkg/system/names.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package system + +import ( + "fmt" + "os" +) + +const ( + NamespaceEnvKey = "SYSTEM_NAMESPACE" +) + +// Namespace holds the K8s namespace where our serving system +// components run. +func Namespace() string { + if ns := os.Getenv(NamespaceEnvKey); ns != "" { + return ns + } + + panic(fmt.Sprintf(`The environment variable %q is not set + +If this is a process running on Kubernetes, then it should be using the downward +API to initialize this variable via: + + env: + - name: %s + valueFrom: + fieldRef: + fieldPath: metadata.namespace + +If this is a Go unit test consuming system.Namespace() then it should add the +following import: + +import ( + _ "github.com/knative/pkg/system/testing" +)`, NamespaceEnvKey, NamespaceEnvKey)) +} diff --git a/vendor/github.com/knative/pkg/system/testing/names.go b/vendor/github.com/knative/pkg/system/testing/names.go new file mode 100644 index 000000000..ac4945a96 --- /dev/null +++ b/vendor/github.com/knative/pkg/system/testing/names.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "os" + + "github.com/knative/pkg/system" +) + +func init() { + os.Setenv(system.NamespaceEnvKey, "knative-testing") +} diff --git a/vendor/github.com/knative/pkg/test/OWNERS b/vendor/github.com/knative/pkg/test/OWNERS new file mode 100644 index 000000000..c50adc849 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/github.com/knative/pkg/test/README.md b/vendor/github.com/knative/pkg/test/README.md new file mode 100644 index 000000000..422e5ed60 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/README.md @@ -0,0 +1,259 @@ +# Test + +This directory contains tests and testing docs. + +- [Test library](#test-library) contains code you can use in your `knative` + tests +- [Flags](#flags) added by [the test library](#test-library) +- [Unit tests](#running-unit-tests) currently reside in the codebase alongside + the code they test + +## Running unit tests + +To run all unit tests: + +```bash +go test ./... +``` + +## Test library + +You can use the test library in this dir to: + +- [Use common test flags](#use-common-test-flags) +- [Output logs](#output-logs) +- [Emit metrics](#emit-metrics) +- [Ensure test cleanup](#ensure-test-cleanup) + +### Use common test flags + +These flags are useful for running against an existing cluster, making use of +your existing +[environment setup](https://github.com/knative/serving/blob/master/DEVELOPMENT.md#environment-setup). + +By importing `github.com/knative/pkg/test` you get access to a global variable +called `test.Flags` which holds the values of +[the command line flags](/test/README.md#flags). + +```go +logger.Infof("Using namespace %s", test.Flags.Namespace) +``` + +_See [e2e_flags.go](./e2e_flags.go)._ + +### Output logs + +[When tests are run with `--logverbose` option](README.md#output-verbose-logs), +debug logs will be emitted to stdout. + +We are using a generic +[FormatLogger](https://github.com/knative/pkg/blob/master/test/logging/logging.go#L49) +that can be passed in any existing logger that satisfies it. Test can use the +generic [logging methods](https://golang.org/pkg/testing/#T) to log info and +error logs. All the common methods accept generic FormatLogger as a parameter +and tests can pass in `t.Logf` like this: + +```go +_, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + ...), +``` + +_See [logging.go](./logging/logging.go)._ + +### Emit metrics + +You can emit metrics from your tests using +[the opencensus library](https://github.com/census-instrumentation/opencensus-go), +which +[is being used inside Knative as well](https://github.com/knative/serving/blob/master/docs/telemetry.md). +These metrics will be emitted by the test if the test is run with +[the `--emitmetrics` option](#metrics-flag). + +You can record arbitrary metrics with +[`stats.Record`](https://github.com/census-instrumentation/opencensus-go#stats) +or measure latency by creating a instance of +[`trace.Span`](https://github.com/census-instrumentation/opencensus-go#traces) +by using the helper method [`logging.GetEmitableSpan()`](../logging/logger.go) + +```go +span := logging.GetEmitableSpan(context.Background(), "MyMetric") +``` + +- These traces will be emitted automatically by + [the generic crd polling functions](#check-knative-serving-resources). +- The traces are emitted by [a custom metric exporter](./logging/logging.go) + that uses the global logger instance. + +#### Metric format + +When a `trace` metric is emitted, the format is +`metric `. The name of the metric is +arbitrary and can be any string. The values are: + +- `metric` - Indicates this log is a metric +- `` - Arbitrary string identifying the metric +- `` - Unix time in nanoseconds when measurement started +- `` - Unix time in nanoseconds when measurement ended +- `` - The difference in ms between the startTime and endTime + +For example: + +```bash +metric WaitForConfigurationState/prodxiparjxt/ConfigurationUpdatedWithRevision 1529980772357637397 1529980772431586609 73.949212ms +``` + +_The [`Wait` methods](#check-knative-serving-resources) (which poll resources) +will prefix the metric names with the name of the function, and if applicable, +the name of the resource, separated by `/`. In the example above, +`WaitForConfigurationState` is the name of the function, and `prodxiparjxt` is +the name of the configuration resource being polled. +`ConfigurationUpdatedWithRevision` is the string passed to +`WaitForConfigurationState` by the caller to identify what state is being polled +for._ + +### Check Knative Serving resources + +_WARNING: this code also exists in +[`knative/serving`](https://github.com/knative/serving/blob/master/test/adding_tests.md#make-requests-against-deployed-services)._ + +After creating Knative Serving resources or making changes to them, you will +need to wait for the system to realize those changes. You can use the Knative +Serving CRD check and polling methods to check the resources are either in or +reach the desired state. + +The `WaitFor*` functions use the kubernetes +[`wait` package](https://godoc.org/k8s.io/apimachinery/pkg/util/wait). To poll +they use +[`PollImmediate`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#PollImmediate) +and the return values of the function you provide behave the same as +[`ConditionFunc`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#ConditionFunc): +a `bool` to indicate if the function should stop or continue polling, and an +`error` to indicate if there has been an error. + +For example, you can poll a `Configuration` object to find the name of the +`Revision` that was created for it: + +```go +var revisionName string +err := test.WaitForConfigurationState( + clients.ServingClient, configName, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != "" { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil + }, "ConfigurationUpdatedWithRevision") +``` + +_[Metrics will be emitted](#emit-metrics) for these `Wait` method tracking how +long test poll for._ + +_See [kube_checks.go](./kube_checks.go)._ + +### Ensure test cleanup + +To ensure your test is cleaned up, you should defer cleanup to execute after +your test completes and also ensure the cleanup occurs if the test is +interrupted: + +```go +defer tearDown(clients) +test.CleanupOnInterrupt(func() { tearDown(clients) }) +``` + +_See [cleanup.go](./cleanup.go)._ + +## Flags + +Importing [the test library](#test-library) adds flags that are useful for end +to end tests that need to run against a cluster. + +Tests importing [`github.com/knative/pkg/test`](#test-library) recognize these +flags: + +- [`--kubeconfig`](#specifying-kubeconfig) +- [`--cluster`](#specifying-cluster) +- [`--namespace`](#specifying-namespace) +- [`--logverbose`](#output-verbose-logs) +- [`--emitmetrics`](#metrics-flag) + +### Specifying kubeconfig + +By default the tests will use the +[kubeconfig file](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) +at `~/.kube/config`. If there is an error getting the current user, it will use +`kubeconfig` instead as the default value. You can specify a different config +file with the argument `--kubeconfig`. + +To run tests with a non-default kubeconfig file: + +```bash +go test ./test --kubeconfig /my/path/kubeconfig +``` + +### Specifying cluster + +The `--cluster` argument lets you use a different cluster than +[your specified kubeconfig's](#specifying-kubeconfig) active context. + +```bash +go test ./test --cluster your-cluster-name +``` + +The current cluster names can be obtained by running: + +```bash +kubectl config get-clusters +``` + +### Specifying ingress endpoint + +The `--ingressendpoint` argument lets you specify a static url to use as the +ingress server during tests. This is useful for Kubernetes configurations which +do not provide external IPs. + +```bash +go test ./test --ingressendpoint :32380 +``` + +### Specifying namespace + +The `--namespace` argument lets you specify the namespace to use for the tests. +By default, tests will use `serving-tests`. + +```bash +go test ./test --namespace your-namespace-name +``` + +### Output verbose logs + +The `--logverbose` argument lets you see verbose test logs and k8s logs. + +```bash +go test ./test --logverbose +``` + +### Metrics flag + +Running tests with the `--emitmetrics` argument will cause latency metrics to be +emitted by the tests. + +```bash +go test ./test --emitmetrics +``` + +- To add additional metrics to a test, see + [emitting metrics](https://github.com/knative/pkg/tree/master/test#emit-metrics). +- For more info on the format of the metrics, see + [metric format](https://github.com/knative/pkg/tree/master/test#emit-metrics). + +[minikube]: https://kubernetes.io/docs/setup/minikube/ + +--- + +Except as otherwise noted, the content of this page is licensed under the +[Creative Commons Attribution 4.0 License](https://creativecommons.org/licenses/by/4.0/), +and code samples are licensed under the +[Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0). diff --git a/vendor/github.com/knative/pkg/test/cleanup.go b/vendor/github.com/knative/pkg/test/cleanup.go new file mode 100644 index 000000000..aa2c860fd --- /dev/null +++ b/vendor/github.com/knative/pkg/test/cleanup.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// cleanup allows you to define a cleanup function that will be executed +// if your test is interrupted. + +package test + +import ( + "os" + "os/signal" + + "github.com/knative/pkg/test/logging" +) + +// CleanupOnInterrupt will execute the function cleanup if an interrupt signal is caught +func CleanupOnInterrupt(cleanup func(), logf logging.FormatLogger) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + for range c { + logf("Test interrupted, cleaning up.") + cleanup() + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/knative/pkg/test/clients.go b/vendor/github.com/knative/pkg/test/clients.go new file mode 100644 index 000000000..fbd9e6583 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/clients.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains an object which encapsulates k8s clients which are useful for e2e tests. + +package test + +import ( + "fmt" + "strings" + + "github.com/knative/pkg/test/logging" + "github.com/knative/pkg/test/spoof" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + k8styped "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// KubeClient holds instances of interfaces for making requests to kubernetes client. +type KubeClient struct { + Kube *kubernetes.Clientset +} + +// NewSpoofingClient returns a spoofing client to make requests +func NewSpoofingClient(client *KubeClient, logf logging.FormatLogger, domain string, resolvable bool) (*spoof.SpoofingClient, error) { + return spoof.New(client.Kube, logf, domain, resolvable, Flags.IngressEndpoint) +} + +// NewKubeClient instantiates and returns several clientsets required for making request to the +// kube client specified by the combination of clusterName and configPath. Clients can make requests within namespace. +func NewKubeClient(configPath string, clusterName string) (*KubeClient, error) { + cfg, err := BuildClientConfig(configPath, clusterName) + if err != nil { + return nil, err + } + + k, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + return &KubeClient{Kube: k}, nil +} + +// BuildClientConfig builds the client config specified by the config path and the cluster name +func BuildClientConfig(kubeConfigPath string, clusterName string) (*rest.Config, error) { + overrides := clientcmd.ConfigOverrides{} + // Override the cluster name if provided. + if clusterName != "" { + overrides.Context.Cluster = clusterName + } + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath}, + &overrides).ClientConfig() +} + +// UpdateConfigMap updates the config map for specified @name with values +func (client *KubeClient) UpdateConfigMap(name string, configName string, values map[string]string) error { + configMap, err := client.GetConfigMap(name).Get(configName, metav1.GetOptions{}) + if err != nil { + return err + } + + for key, value := range values { + configMap.Data[key] = value + } + + _, err = client.GetConfigMap(name).Update(configMap) + return err +} + +// GetConfigMap gets the knative serving config map. +func (client *KubeClient) GetConfigMap(name string) k8styped.ConfigMapInterface { + return client.Kube.CoreV1().ConfigMaps(name) +} + +// CreatePod will create a Pod +func (client *KubeClient) CreatePod(pod *corev1.Pod) (*corev1.Pod, error) { + pods := client.Kube.CoreV1().Pods(pod.GetNamespace()) + return pods.Create(pod) +} + +// PodLogs returns Pod logs for given Pod and Container in the namespace +func (client *KubeClient) PodLogs(podName, containerName, namespace string) ([]byte, error) { + pods := client.Kube.CoreV1().Pods(namespace) + podList, err := pods.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, pod := range podList.Items { + if strings.Contains(pod.Name, podName) { + result := pods.GetLogs(pod.Name, &corev1.PodLogOptions{ + Container: containerName, + }).Do() + return result.Raw() + } + } + return nil, fmt.Errorf("Could not find logs for %s/%s", podName, containerName) +} diff --git a/vendor/github.com/knative/pkg/test/crd.go b/vendor/github.com/knative/pkg/test/crd.go new file mode 100644 index 000000000..ac74d09a7 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/crd.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains functions that construct boilerplate CRD definitions. + +package test + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + nginxPort = 80 + nginxName = "nginx" + nginxImage = "nginx:1.7.9" +) + +// ServiceAccount returns ServiceAccount object in given namespace +func ServiceAccount(name string, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} + +// ClusterRoleBinding returns ClusterRoleBinding for given subject and role +func ClusterRoleBinding(name string, namespace string, serviceAccount string, role string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccount, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: role, + APIGroup: "rbac.authorization.k8s.io", + }, + } +} + +// CoreV1ObjectReference returns a corev1.ObjectReference for the given name, kind and apiversion +func CoreV1ObjectReference(kind, apiversion, name string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: kind, + APIVersion: apiversion, + Name: name, + } +} + +// NginxPod returns nginx pod defined in given namespace +func NginxPod(namespace string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: nginxName, + Namespace: namespace, + Annotations: map[string]string{"sidecar.istio.io/inject": "true"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: nginxName, + Image: nginxImage, + Ports: []corev1.ContainerPort{ + { + ContainerPort: nginxPort, + }, + }, + }, + }, + }, + } +} diff --git a/vendor/github.com/knative/pkg/test/e2e_flags.go b/vendor/github.com/knative/pkg/test/e2e_flags.go new file mode 100644 index 000000000..b5d911fdf --- /dev/null +++ b/vendor/github.com/knative/pkg/test/e2e_flags.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains logic to encapsulate flags which are needed to specify +// what cluster, etc. to use for e2e tests. + +package test + +import ( + "flag" + "fmt" + "os" + "os/user" + "path" +) + +// Flags holds the command line flags or defaults for settings in the user's environment. +// See EnvironmentFlags for a list of supported fields. +var Flags = initializeFlags() + +// EnvironmentFlags define the flags that are needed to run the e2e tests. +type EnvironmentFlags struct { + Cluster string // K8s cluster (defaults to cluster in kubeconfig) + Kubeconfig string // Path to kubeconfig (defaults to ./kube/config) + Namespace string // K8s namespace (blank by default, to be overwritten by test suite) + IngressEndpoint string // Host to use for ingress endpoint + LogVerbose bool // Enable verbose logging + EmitMetrics bool // Emit metrics + DockerRepo string // Docker repo (defaults to $KO_DOCKER_REPO) + Tag string // Tag for test images +} + +func initializeFlags() *EnvironmentFlags { + var f EnvironmentFlags + flag.StringVar(&f.Cluster, "cluster", "", + "Provide the cluster to test against. Defaults to the current cluster in kubeconfig.") + + var defaultKubeconfig string + if usr, err := user.Current(); err == nil { + defaultKubeconfig = path.Join(usr.HomeDir, ".kube/config") + } + + flag.StringVar(&f.Kubeconfig, "kubeconfig", defaultKubeconfig, + "Provide the path to the `kubeconfig` file you'd like to use for these tests. The `current-context` will be used.") + + flag.StringVar(&f.Namespace, "namespace", "", + "Provide the namespace you would like to use for these tests.") + + flag.StringVar(&f.IngressEndpoint, "ingressendpoint", "", "Provide a static endpoint url to the ingress server used during tests.") + + flag.BoolVar(&f.LogVerbose, "logverbose", false, + "Set this flag to true if you would like to see verbose logging.") + + flag.BoolVar(&f.EmitMetrics, "emitmetrics", false, + "Set this flag to true if you would like tests to emit metrics, e.g. latency of resources being realized in the system.") + + defaultRepo := os.Getenv("KO_DOCKER_REPO") + flag.StringVar(&f.DockerRepo, "dockerrepo", defaultRepo, + "Provide the uri of the docker repo you have uploaded the test image to using `uploadtestimage.sh`. Defaults to $KO_DOCKER_REPO") + + flag.StringVar(&f.Tag, "tag", "latest", "Provide the version tag for the test images.") + + return &f +} + +// ImagePath is a helper function to prefix image name with repo and suffix with tag +func ImagePath(name string) string { + return fmt.Sprintf("%s/%s:%s", Flags.DockerRepo, name, Flags.Tag) +} diff --git a/vendor/github.com/knative/pkg/test/helpers/data.go b/vendor/github.com/knative/pkg/test/helpers/data.go new file mode 100644 index 000000000..e982b2c33 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/helpers/data.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "math/rand" + "strings" + "time" + "unicode" +) + +const ( + letterBytes = "abcdefghijklmnopqrstuvwxyz" + randSuffixLen = 8 + sep = '-' +) + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +// AppendRandomString will generate a random string that begins with prefix. +// This is useful if you want to make sure that your tests can run at the same +// time against the same environment without conflicting. +// This method will use "-" as the separator between the prefix and +// the random suffix. +// This method will seed rand with the current time when the package is initialized. +func AppendRandomString(prefix string) string { + suffix := make([]byte, randSuffixLen) + + for i := range suffix { + suffix[i] = letterBytes[rand.Intn(len(letterBytes))] + } + + return strings.Join([]string{prefix, string(suffix)}, string(sep)) +} + +// MakeK8sNamePrefix converts each chunk of non-alphanumeric character into a single dash +// and also convert camelcase tokens into dash-delimited lowercase tokens. +func MakeK8sNamePrefix(s string) string { + var sb strings.Builder + newToken := false + for _, c := range s { + if !(unicode.IsLetter(c) || unicode.IsNumber(c)) { + newToken = true + continue + } + if sb.Len() > 0 && (newToken || unicode.IsUpper(c)) { + sb.WriteRune(sep) + } + sb.WriteRune(unicode.ToLower(c)) + newToken = false + } + return sb.String() +} + +// GetBaseFuncName returns the baseFuncName parsed from the fullFuncName. +// eg. test/e2e.TestMain will return TestMain. +func GetBaseFuncName(fullFuncName string) string { + baseFuncName := fullFuncName[strings.LastIndex(fullFuncName, "/")+1:] + baseFuncName = baseFuncName[strings.LastIndex(baseFuncName, ".")+1:] + return baseFuncName +} diff --git a/vendor/github.com/knative/pkg/test/ingress/ingress.go b/vendor/github.com/knative/pkg/test/ingress/ingress.go new file mode 100644 index 000000000..cea5da5cb --- /dev/null +++ b/vendor/github.com/knative/pkg/test/ingress/ingress.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "fmt" + "os" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + // TODO(tcnghia): These probably shouldn't be hard-coded here? + istioIngressNamespace = "istio-system" + istioIngressName = "istio-ingressgateway" +) + +// GetIngressEndpoint gets the endpoint IP or hostname to use for the service. +func GetIngressEndpoint(kubeClientset *kubernetes.Clientset) (*string, error) { + ingressName := istioIngressName + if gatewayOverride := os.Getenv("GATEWAY_OVERRIDE"); gatewayOverride != "" { + ingressName = gatewayOverride + } + ingressNamespace := istioIngressNamespace + if gatewayNsOverride := os.Getenv("GATEWAY_NAMESPACE_OVERRIDE"); gatewayNsOverride != "" { + ingressNamespace = gatewayNsOverride + } + + ingress, err := kubeClientset.CoreV1().Services(ingressNamespace).Get(ingressName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + endpoint, err := EndpointFromService(ingress) + if err != nil { + return nil, err + } + return &endpoint, nil +} + +// EndpointFromService extracts the endpoint from the service's ingress. +func EndpointFromService(svc *v1.Service) (string, error) { + ingresses := svc.Status.LoadBalancer.Ingress + if len(ingresses) != 1 { + return "", fmt.Errorf("Expected exactly one ingress load balancer, instead had %d: %v", len(ingresses), ingresses) + } + itu := ingresses[0] + + switch { + case itu.IP != "": + return itu.IP, nil + case itu.Hostname != "": + return itu.Hostname, nil + default: + return "", fmt.Errorf("Expected ingress loadbalancer IP or hostname for %s to be set, instead was empty", svc.Name) + } +} diff --git a/vendor/github.com/knative/pkg/test/kube_checks.go b/vendor/github.com/knative/pkg/test/kube_checks.go new file mode 100644 index 000000000..1b78914f5 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/kube_checks.go @@ -0,0 +1,132 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// kube_checks contains functions which poll Kubernetes objects until +// they get into the state desired by the caller or time out. + +package test + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/knative/pkg/test/logging" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + k8styped "k8s.io/client-go/kubernetes/typed/core/v1" +) + +const ( + interval = 1 * time.Second + podTimeout = 8 * time.Minute + logTimeout = 1 * time.Minute +) + +// WaitForDeploymentState polls the status of the Deployment called name +// from client every interval until inState returns `true` indicating it +// is done, returns an error or timeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForDeploymentState(client *KubeClient, name string, inState func(d *appsv1.Deployment) (bool, error), desc string, namespace string, timeout time.Duration) error { + d := client.Kube.AppsV1().Deployments(namespace) + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc)) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + d, err := d.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(d) + }) +} + +// WaitForPodListState polls the status of the PodList +// from client every interval until inState returns `true` indicating it +// is done, returns an error or timeout. desc will be used to name the metric +// that is emitted to track how long it took to get into the state checked by inState. +func WaitForPodListState(client *KubeClient, inState func(p *corev1.PodList) (bool, error), desc string, namespace string) error { + p := client.Kube.CoreV1().Pods(namespace) + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForPodListState/%s", desc)) + defer span.End() + + return wait.PollImmediate(interval, podTimeout, func() (bool, error) { + p, err := p.List(metav1.ListOptions{}) + if err != nil { + return true, err + } + return inState(p) + }) +} + +// GetConfigMap gets the configmaps for a given namespace +func GetConfigMap(client *KubeClient, namespace string) k8styped.ConfigMapInterface { + return client.Kube.CoreV1().ConfigMaps(namespace) +} + +// DeploymentScaledToZeroFunc returns a func that evaluates if a deployment has scaled to 0 pods +func DeploymentScaledToZeroFunc() func(d *appsv1.Deployment) (bool, error) { + return func(d *appsv1.Deployment) (bool, error) { + return d.Status.ReadyReplicas == 0, nil + } +} + +// WaitForLogContent waits until logs for given Pod/Container include the given content. +// If the content is not present within timeout it returns error. +func WaitForLogContent(client *KubeClient, podName, containerName, namespace, content string) error { + return wait.PollImmediate(interval, logTimeout, func() (bool, error) { + logs, err := client.PodLogs(podName, containerName, namespace) + if err != nil { + return true, err + } + return strings.Contains(string(logs), content), nil + }) +} + +// WaitForAllPodsRunning waits for all the pods to be in running state +func WaitForAllPodsRunning(client *KubeClient, namespace string) error { + return WaitForPodListState(client, PodsRunning, "PodsAreRunning", namespace) +} + +// WaitForPodRunning waits for the given pod to be in running state +func WaitForPodRunning(client *KubeClient, name string, namespace string) error { + p := client.Kube.CoreV1().Pods(namespace) + return wait.PollImmediate(interval, podTimeout, func() (bool, error) { + p, err := p.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return PodRunning(p), nil + }) +} + +// PodsRunning will check the status conditions of the pod list and return true all pods are Running +func PodsRunning(podList *corev1.PodList) (bool, error) { + for _, pod := range podList.Items { + if isRunning := PodRunning(&pod); !isRunning { + return false, nil + } + } + return true, nil +} + +// PodRunning will check the status conditions of the pod and return true if it's Running +func PodRunning(pod *corev1.Pod) bool { + return pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded +} diff --git a/vendor/github.com/knative/pkg/test/logging/logging.go b/vendor/github.com/knative/pkg/test/logging/logging.go new file mode 100644 index 000000000..2c641bd7a --- /dev/null +++ b/vendor/github.com/knative/pkg/test/logging/logging.go @@ -0,0 +1,146 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// logging.go contains the logic to configure and interact with the +// logging and metrics libraries. + +package logging + +import ( + "context" + "flag" + "fmt" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/golang/glog" + "github.com/knative/pkg/logging" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "go.uber.org/zap" +) + +const ( + // VerboseLogLevel defines verbose log level as 10 + VerboseLogLevel glog.Level = 10 + + // 1 second was chosen arbitrarily + metricViewReportingPeriod = 1 * time.Second + + // prefix attached to metric name that indicates to the + // ExportSpan method that span needs to be emitted. + emitableSpanNamePrefix = "emitspan-" +) + +// FormatLogger is a printf style function for logging in tests. +type FormatLogger func(template string, args ...interface{}) + +var logger *zap.SugaredLogger + +var exporter *zapMetricExporter + +// zapMetricExporter is a stats and trace exporter that logs the +// exported data to the provided (probably test specific) zap logger. +// It conforms to the view.Exporter and trace.Exporter interfaces. +type zapMetricExporter struct { + logger *zap.SugaredLogger +} + +// ExportView will emit the view data vd (i.e. the stats that have been +// recorded) to the zap logger. +func (e *zapMetricExporter) ExportView(vd *view.Data) { + // We are not currently consuming these metrics, so for now we'll juse + // dump the view.Data object as is. + e.logger.Debug(spew.Sprint(vd)) +} + +// GetEmitableSpan starts and returns a trace.Span with a name that +// is used by the ExportSpan method to emit the span. +func GetEmitableSpan(ctx context.Context, metricName string) *trace.Span { + _, span := trace.StartSpan(ctx, emitableSpanNamePrefix+metricName) + return span +} + +// ExportSpan will emit the trace data to the zap logger. The span is emitted +// only if the metric name is prefix with emitableSpanNamePrefix constant. +func (e *zapMetricExporter) ExportSpan(vd *trace.SpanData) { + if strings.HasPrefix(vd.Name, emitableSpanNamePrefix) { + duration := vd.EndTime.Sub(vd.StartTime) + // We will start the log entry with `metric` to identify it as a metric for parsing + e.logger.Infof("metric %s %d %d %s", vd.Name[len(emitableSpanNamePrefix):], vd.StartTime.UnixNano(), vd.EndTime.UnixNano(), duration) + } +} + +func newLogger(logLevel string) *zap.SugaredLogger { + configJSONTemplate := `{ + "level": "%s", + "encoding": "console", + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoderConfig": { + "timeKey": "ts", + "messageKey": "message", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + }` + configJSON := fmt.Sprintf(configJSONTemplate, logLevel) + l, _ := logging.NewLogger(string(configJSON), logLevel, zap.AddCallerSkip(1)) + return l +} + +// InitializeMetricExporter initializes the metric exporter logger +func InitializeMetricExporter(context string) { + // If there was a previously registered exporter, unregister it so we only emit + // the metrics in the current context. + if exporter != nil { + view.UnregisterExporter(exporter) + trace.UnregisterExporter(exporter) + } + + logger := logger.Named(context) + + exporter = &zapMetricExporter{logger: logger} + view.RegisterExporter(exporter) + trace.RegisterExporter(exporter) + + view.SetReportingPeriod(metricViewReportingPeriod) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) +} + +// InitializeLogger initializes the base logger +func InitializeLogger(logVerbose bool) { + logLevel := "info" + if logVerbose { + // Both gLog and "go test" use -v flag. The code below is a work around so that we can still set v value for gLog + flag.StringVar(&logLevel, "logLevel", fmt.Sprint(VerboseLogLevel), "verbose log level") + flag.Lookup("v").Value.Set(logLevel) + glog.Infof("Logging set to verbose mode with logLevel %d", VerboseLogLevel) + + logLevel = "debug" + } + + logger = newLogger(logLevel) +} diff --git a/vendor/github.com/knative/pkg/test/monitoring/doc.go b/vendor/github.com/knative/pkg/test/monitoring/doc.go new file mode 100644 index 000000000..b15d7c726 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/monitoring/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package monitoring provides common methods for all the monitoring components used in the tests + +This package exposes following methods: + + CheckPortAvailability(port int) error + Checks if the given port is available + GetPods(kubeClientset *kubernetes.Clientset, app string) (*v1.PodList, error) + Gets the list of pods that satisfy the lable selector app= + Cleanup(pid int) error + Kill the current port forwarding process running in the background + PortForward(logf logging.FormatLogger, podList *v1.PodList, localPort, remotePort int) (int, error) + Create a background process that will port forward the first pod from the local to remote port + It returns the process id for the background process created. +*/ +package monitoring diff --git a/vendor/github.com/knative/pkg/test/monitoring/monitoring.go b/vendor/github.com/knative/pkg/test/monitoring/monitoring.go new file mode 100644 index 000000000..ccbad42b1 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/monitoring/monitoring.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package monitoring + +import ( + "fmt" + "net" + "os" + "os/exec" + "strings" + + "github.com/knative/pkg/test/logging" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// CheckPortAvailability checks to see if the port is available on the machine. +func CheckPortAvailability(port int) error { + server, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + // Port is likely taken + return err + } + server.Close() + + return nil +} + +// GetPods retrieves the current existing podlist for the app in monitoring namespace +// This uses app= as labelselector for selecting pods +func GetPods(kubeClientset *kubernetes.Clientset, app, namespace string) (*v1.PodList, error) { + pods, err := kubeClientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", app)}) + if err == nil && len(pods.Items) == 0 { + err = fmt.Errorf("No %s Pod found on the cluster. Ensure monitoring is switched on for your Knative Setup", app) + } + + return pods, err +} + +// Cleanup will clean the background process used for port forwarding +func Cleanup(pid int) error { + ps := os.Process{Pid: pid} + return ps.Kill() +} + +// PortForward sets up local port forward to the pod specified by the "app" label in the given namespace +func PortForward(logf logging.FormatLogger, podList *v1.PodList, localPort, remotePort int, namespace string) (int, error) { + podName := podList.Items[0].Name + portFwdCmd := fmt.Sprintf("kubectl port-forward %s %d:%d -n %s", podName, localPort, remotePort, namespace) + portFwdProcess, err := executeCmdBackground(logf, portFwdCmd) + + if err != nil { + return 0, fmt.Errorf("Failed to port forward: %v", err) + } + + logf("running %s port-forward in background, pid = %d", podName, portFwdProcess.Pid) + return portFwdProcess.Pid, nil +} + +// RunBackground starts a background process and returns the Process if succeed +func executeCmdBackground(logf logging.FormatLogger, format string, args ...interface{}) (*os.Process, error) { + cmd := fmt.Sprintf(format, args...) + logf("Executing command: %s", cmd) + parts := strings.Split(cmd, " ") + c := exec.Command(parts[0], parts[1:]...) // #nosec + if err := c.Start(); err != nil { + return nil, fmt.Errorf("%s command failed: %v", cmd, err) + } + return c.Process, nil +} diff --git a/vendor/github.com/knative/pkg/test/presubmit-tests.sh b/vendor/github.com/knative/pkg/test/presubmit-tests.sh new file mode 100755 index 000000000..c3861911f --- /dev/null +++ b/vendor/github.com/knative/pkg/test/presubmit-tests.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the presubmit tests, in the right order. +# It is started by prow for each PR. +# For convenience, it can also be executed manually. + +# Markdown linting failures don't show up properly in Gubernator resulting +# in a net-negative contributor experience. +export DISABLE_MD_LINTING=1 + +source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh + +# TODO(#17): Write integration tests. + +# We use the default build, unit and integration test runners. + +main $@ diff --git a/vendor/github.com/knative/pkg/test/request.go b/vendor/github.com/knative/pkg/test/request.go new file mode 100644 index 000000000..1735b6073 --- /dev/null +++ b/vendor/github.com/knative/pkg/test/request.go @@ -0,0 +1,154 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// request contains logic to make polling HTTP requests against an endpoint with optional host spoofing. + +package test + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/knative/pkg/test/logging" + "github.com/knative/pkg/test/spoof" +) + +// Retrying modifies a ResponseChecker to retry certain response codes. +func Retrying(rc spoof.ResponseChecker, codes ...int) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + for _, code := range codes { + if resp.StatusCode == code { + // Returning (false, nil) causes SpoofingClient.Poll to retry. + // sc.logger.Infof("Retrying for code %v", resp.StatusCode) + return false, nil + } + } + + // If we didn't match any retryable codes, invoke the ResponseChecker that we wrapped. + return rc(resp) + } +} + +// IsOneOfStatusCodes checks that the response code is equal to the given one. +func IsOneOfStatusCodes(codes ...int) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + for _, code := range codes { + if resp.StatusCode == code { + return true, nil + } + } + + return true, fmt.Errorf("status = %d, want one of: %v", resp.StatusCode, codes) + } +} + +// IsStatusOK checks that the response code is a 200. +func IsStatusOK(resp *spoof.Response) (bool, error) { + return IsOneOfStatusCodes(http.StatusOK)(resp) +} + +// MatchesBody checks that the *first* response body matches the "expected" body, otherwise failing. +func MatchesBody(expected string) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + if !strings.Contains(string(resp.Body), expected) { + // Returning (true, err) causes SpoofingClient.Poll to fail. + return true, fmt.Errorf("body = %s, want: %s", string(resp.Body), expected) + } + + return true, nil + } +} + +// EventuallyMatchesBody checks that the response body *eventually* matches the expected body. +// TODO(#1178): Delete me. We don't want to need this; we should be waiting for an appropriate Status instead. +func EventuallyMatchesBody(expected string) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + if !strings.Contains(string(resp.Body), expected) { + // Returning (false, nil) causes SpoofingClient.Poll to retry. + return false, nil + } + + return true, nil + } +} + +// MatchesAllOf combines multiple ResponseCheckers to one ResponseChecker with a logical AND. The +// checkers are executed in order. The first function to trigger an error or a retry will short-circuit +// the other functions (they will not be executed). +// +// This is useful for combining a body with a status check like: +// MatchesAllOf(IsStatusOK, MatchesBody("test")) +// +// The MatchesBody check will only be executed after the IsStatusOK has passed. +func MatchesAllOf(checkers ...spoof.ResponseChecker) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + for _, checker := range checkers { + done, err := checker(resp) + if err != nil || !done { + return done, err + } + } + return true, nil + } +} + +// WaitForEndpointState will poll an endpoint until inState indicates the state is achieved, +// or default timeout is reached. +// If resolvableDomain is false, it will use kubeClientset to look up the ingress and spoof +// the domain in the request headers, otherwise it will make the request directly to domain. +// desc will be used to name the metric that is emitted to track how long it took for the +// domain to get into the state checked by inState. Commas in `desc` must be escaped. +func WaitForEndpointState(kubeClient *KubeClient, logf logging.FormatLogger, theURL string, inState spoof.ResponseChecker, desc string, resolvable bool) (*spoof.Response, error) { + return WaitForEndpointStateWithTimeout(kubeClient, logf, theURL, inState, desc, resolvable, spoof.RequestTimeout) +} + +// WaitForEndpointStateWithTimeout will poll an endpoint until inState indicates the state is achieved +// or the provided timeout is achieved. +// If resolvableDomain is false, it will use kubeClientset to look up the ingress and spoof +// the domain in the request headers, otherwise it will make the request directly to domain. +// desc will be used to name the metric that is emitted to track how long it took for the +// domain to get into the state checked by inState. Commas in `desc` must be escaped. +func WaitForEndpointStateWithTimeout( + kubeClient *KubeClient, logf logging.FormatLogger, theURL string, inState spoof.ResponseChecker, + desc string, resolvable bool, timeout time.Duration) (*spoof.Response, error) { + defer logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForEndpointState/%s", desc)).End() + + // Try parsing the "theURL" with and without a scheme. + asURL, err := url.Parse(fmt.Sprintf("http://%s", theURL)) + if err != nil { + asURL, err = url.Parse(theURL) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(http.MethodGet, asURL.String(), nil) + if err != nil { + return nil, err + } + + client, err := NewSpoofingClient(kubeClient, logf, asURL.Hostname(), resolvable) + if err != nil { + return nil, err + } + client.RequestTimeout = timeout + + return client.Poll(req, inState) +} diff --git a/vendor/github.com/knative/pkg/test/spoof/error_checks.go b/vendor/github.com/knative/pkg/test/spoof/error_checks.go new file mode 100644 index 000000000..0cd2995ca --- /dev/null +++ b/vendor/github.com/knative/pkg/test/spoof/error_checks.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// spoof contains logic to make polling HTTP requests against an endpoint with optional host spoofing. + +package spoof + +import ( + "net" + "strings" +) + +func isTCPTimeout(e error) bool { + err, ok := e.(net.Error) + return err != nil && ok && err.Timeout() +} + +func isDNSError(err error) bool { + if err == nil { + return false + } + // Checking by casting to url.Error and casting the nested error + // seems to be not as robust as string check. + msg := strings.ToLower(err.Error()) + // Example error message: + // > Get http://this.url.does.not.exist: dial tcp: lookup this.url.does.not.exist on 127.0.0.1:53: no such host + return strings.Contains(msg, "no such host") || strings.Contains(msg, ":53") +} + +func isTCPConnectRefuse(err error) bool { + // The alternative for the string check is: + // errNo := (((err.(*url.Error)).Err.(*net.OpError)).Err.(*os.SyscallError).Err).(syscall.Errno) + // if errNo == syscall.Errno(0x6f) {...} + // But with assertions, of course. + if err != nil && strings.Contains(err.Error(), "connect: connection refused") { + return true + } + return false +} diff --git a/vendor/github.com/knative/pkg/test/spoof/spoof.go b/vendor/github.com/knative/pkg/test/spoof/spoof.go new file mode 100644 index 000000000..f23d3049e --- /dev/null +++ b/vendor/github.com/knative/pkg/test/spoof/spoof.go @@ -0,0 +1,241 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// spoof contains logic to make polling HTTP requests against an endpoint with optional host spoofing. + +package spoof + +import ( + "fmt" + "io/ioutil" + "net/http" + "time" + + ingress "github.com/knative/pkg/test/ingress" + "github.com/knative/pkg/test/logging" + "github.com/knative/pkg/test/zipkin" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" +) + +const ( + requestInterval = 1 * time.Second + // RequestTimeout is the default timeout for the polling requests. + RequestTimeout = 5 * time.Minute + // Name of the temporary HTTP header that is added to http.Request to indicate that + // it is a SpoofClient.Poll request. This header is removed before making call to backend. + pollReqHeader = "X-Kn-Poll-Request-Do-Not-Trace" +) + +// Response is a stripped down subset of http.Response. The is primarily useful +// for ResponseCheckers to inspect the response body without consuming it. +// Notably, Body is a byte slice instead of an io.ReadCloser. +type Response struct { + Status string + StatusCode int + Header http.Header + Body []byte +} + +func (r *Response) String() string { + return fmt.Sprintf("status: %d, body: %s, headers: %v", r.StatusCode, string(r.Body), r.Header) +} + +// Interface defines the actions that can be performed by the spoofing client. +type Interface interface { + Do(*http.Request) (*Response, error) + Poll(*http.Request, ResponseChecker) (*Response, error) +} + +// https://medium.com/stupid-gopher-tricks/ensuring-go-interface-satisfaction-at-compile-time-1ed158e8fa17 +var _ Interface = (*SpoofingClient)(nil) + +// ResponseChecker is used to determine when SpoofinClient.Poll is done polling. +// This allows you to predicate wait.PollImmediate on the request's http.Response. +// +// See the apimachinery wait package: +// https://github.com/kubernetes/apimachinery/blob/cf7ae2f57dabc02a3d215f15ca61ae1446f3be8f/pkg/util/wait/wait.go#L172 +type ResponseChecker func(resp *Response) (done bool, err error) + +// SpoofingClient is a minimal HTTP client wrapper that spoofs the domain of requests +// for non-resolvable domains. +type SpoofingClient struct { + Client *http.Client + RequestInterval time.Duration + RequestTimeout time.Duration + + endpoint string + domain string + + logf logging.FormatLogger +} + +// New returns a SpoofingClient that rewrites requests if the target domain is not `resolveable`. +// It does this by looking up the ingress at construction time, so reusing a client will not +// follow the ingress if it moves (or if there are multiple ingresses). +// +// If that's a problem, see test/request.go#WaitForEndpointState for oneshot spoofing. +func New(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, domain string, resolvable bool, endpointOverride string) (*SpoofingClient, error) { + sc := SpoofingClient{ + Client: &http.Client{Transport: &ochttp.Transport{Propagation: &b3.HTTPFormat{}}}, // Using ochttp Transport required for zipkin-tracing + RequestInterval: requestInterval, + RequestTimeout: RequestTimeout, + logf: logf, + } + + if !resolvable { + e := &endpointOverride + if endpointOverride == "" { + var err error + // If the domain that the Route controller is configured to assign to Route.Status.Domain + // (the domainSuffix) is not resolvable, we need to retrieve the endpoint and spoof + // the Host in our requests. + e, err = ingress.GetIngressEndpoint(kubeClientset) + if err != nil { + return nil, err + } + } + + sc.endpoint = *e + sc.domain = domain + } else { + // If the domain is resolvable, we can use it directly when we make requests. + sc.endpoint = domain + } + + return &sc, nil +} + +// Do dispatches to the underlying http.Client.Do, spoofing domains as needed +// and transforming the http.Response into a spoof.Response. +// Each response is augmented with "ZipkinTraceID" header that identifies the zipkin trace corresponding to the request. +func (sc *SpoofingClient) Do(req *http.Request) (*Response, error) { + // Controls the Host header, for spoofing. + if sc.domain != "" { + req.Host = sc.domain + } + + // Controls the actual resolution. + if sc.endpoint != "" { + req.URL.Host = sc.endpoint + } + + // Starting span to capture zipkin trace. + traceContext, span := trace.StartSpan(req.Context(), "SpoofingClient-Trace") + defer span.End() + + // Check to see if the call to this method is coming from a Poll call. + logZipkinTrace := true + if req.Header.Get(pollReqHeader) != "" { + req.Header.Del(pollReqHeader) + logZipkinTrace = false + } + resp, err := sc.Client.Do(req.WithContext(traceContext)) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + resp.Header.Add(zipkin.ZipkinTraceIDHeader, span.SpanContext().TraceID.String()) + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + spoofResp := &Response{ + Status: resp.Status, + StatusCode: resp.StatusCode, + Header: resp.Header, + Body: body, + } + + if logZipkinTrace { + sc.logZipkinTrace(spoofResp) + } + + return spoofResp, nil +} + +// Poll executes an http request until it satisfies the inState condition or encounters an error. +func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker) (*Response, error) { + var ( + resp *Response + err error + ) + + err = wait.PollImmediate(sc.RequestInterval, sc.RequestTimeout, func() (bool, error) { + // As we may do multiple Do calls as part of a single Poll we add this temporary header + // to the request to indicate to Do method not to log Zipkin trace, instead it is + // handled by this method itself. + req.Header.Add(pollReqHeader, "True") + resp, err = sc.Do(req) + if err != nil { + if isTCPTimeout(err) { + sc.logf("Retrying %s for TCP timeout %v", req.URL.String(), err) + return false, nil + } + // Retrying on DNS error, since we may be using xip.io or nip.io in tests. + if isDNSError(err) { + sc.logf("Retrying %s for DNS error %v", req.URL.String(), err) + return false, nil + } + // Repeat the poll on `connection refused` errors, which are usually transient Istio errors. + if isTCPConnectRefuse(err) { + sc.logf("Retrying %s for connection refused %v", req.URL.String(), err) + return false, nil + } + return true, err + } + + return inState(resp) + }) + + if resp != nil { + sc.logZipkinTrace(resp) + } + + if err != nil { + return resp, errors.Wrapf(err, "response: %s did not pass checks", resp) + } + return resp, nil +} + +// logZipkinTrace provides support to log Zipkin Trace for param: spoofResponse +// We only log Zipkin trace for HTTP server errors i.e for HTTP status codes between 500 to 600 +func (sc *SpoofingClient) logZipkinTrace(spoofResp *Response) { + if !zipkin.ZipkinTracingEnabled || spoofResp.StatusCode < http.StatusInternalServerError || spoofResp.StatusCode >= 600 { + return + } + + traceID := spoofResp.Header.Get(zipkin.ZipkinTraceIDHeader) + sc.logf("Logging Zipkin Trace for: %s", traceID) + + // Sleep to ensure all traces are correctly pushed on the backend. + time.Sleep(5 * time.Second) + + json, err := zipkin.JSONTrace(traceID) + if err != nil { + sc.logf("Error getting zipkin trace: %v", err) + } + + sc.logf("%s", json) +} diff --git a/vendor/github.com/knative/pkg/test/zipkin/doc.go b/vendor/github.com/knative/pkg/test/zipkin/doc.go new file mode 100644 index 000000000..0ce3c54fd --- /dev/null +++ b/vendor/github.com/knative/pkg/test/zipkin/doc.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package zipkin adds Zipkin tracing support that can be used in conjunction with +SpoofingClient to log zipkin traces for requests that have encountered server errors +i.e HTTP request that have HTTP status between 500 to 600. + +This package exposes following methods: + + SetupZipkinTracing(*kubernetes.Clientset) error + SetupZipkinTracing sets up zipkin tracing by setting up port-forwarding from + localhost to zipkin pod on the cluster. On successful setup this method sets + an internal flag zipkinTracingEnabled to true. + + CleanupZipkinTracingSetup() error + CleanupZipkinTracingSetup cleans up zipkin tracing setup by cleaning up the + port-forwarding setup by call to SetupZipkinTracing. This method also sets + zipkinTracingEnabled flag to false. + +A general flow for a Test Suite to use Zipkin Tracing support is as follows: + + 1. Call SetupZipkinTracing(*kubernetes.Clientset) in TestMain. + 2. Use SpoofingClient to make HTTP requests. + 3. Call CleanupZipkinTracingSetup on cleanup after tests are executed. + +*/ +package zipkin diff --git a/vendor/github.com/knative/pkg/test/zipkin/util.go b/vendor/github.com/knative/pkg/test/zipkin/util.go new file mode 100644 index 000000000..add07498f --- /dev/null +++ b/vendor/github.com/knative/pkg/test/zipkin/util.go @@ -0,0 +1,145 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//util has constants and helper methods useful for zipkin tracing support. + +package zipkin + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "sync" + + "github.com/knative/pkg/test/logging" + "github.com/knative/pkg/test/monitoring" + "go.opencensus.io/trace" + "k8s.io/client-go/kubernetes" +) + +const ( + //ZipkinTraceIDHeader HTTP response header key to be used to store Zipkin Trace ID. + ZipkinTraceIDHeader = "ZIPKIN_TRACE_ID" + + // ZipkinPort is port exposed by the Zipkin Pod + // https://github.com/knative/serving/blob/master/config/monitoring/200-common/100-zipkin.yaml#L25 configures the Zipkin Port on the cluster. + ZipkinPort = 9411 + + // ZipkinTraceEndpoint port-forwarded zipkin endpoint + ZipkinTraceEndpoint = "http://localhost:9411/api/v2/trace/" + + // App is the name of this component. + // This will be used as a label selector + app = "zipkin" + + // Namespace we are using for istio components + istioNs = "istio-system" +) + +var ( + zipkinPortForwardPID int + + // ZipkinTracingEnabled variable indicating if zipkin tracing is enabled. + ZipkinTracingEnabled = false + + // sync.Once variable to ensure we execute zipkin setup only once. + setupOnce sync.Once + + // sync.Once variable to ensure we execute zipkin cleanup only if zipkin is setup and it is executed only once. + teardownOnce sync.Once +) + +// SetupZipkinTracing sets up zipkin tracing which involves: +// 1. Setting up port-forwarding from localhost to zipkin pod on the cluster +// (pid of the process doing Port-Forward is stored in a global variable). +// 2. Enable AlwaysSample config for tracing. +func SetupZipkinTracing(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger) { + setupOnce.Do(func() { + if err := monitoring.CheckPortAvailability(ZipkinPort); err != nil { + logf("Zipkin port not available on the machine: %v", err) + return + } + + zipkinPods, err := monitoring.GetPods(kubeClientset, app, istioNs) + if err != nil { + logf("Error retrieving Zipkin pod details: %v", err) + return + } + + zipkinPortForwardPID, err = monitoring.PortForward(logf, zipkinPods, ZipkinPort, ZipkinPort, istioNs) + if err != nil { + logf("Error starting kubectl port-forward command: %v", err) + return + } + + logf("Zipkin port-forward process started with PID: %d", zipkinPortForwardPID) + + // Applying AlwaysSample config to ensure we propagate zipkin header for every request made by this client. + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + logf("Successfully setup SpoofingClient for Zipkin Tracing") + ZipkinTracingEnabled = true + }) +} + +// CleanupZipkinTracingSetup cleans up the Zipkin tracing setup on the machine. This involves killing the process performing port-forward. +func CleanupZipkinTracingSetup(logf logging.FormatLogger) { + teardownOnce.Do(func() { + if !ZipkinTracingEnabled { + return + } + + if err := monitoring.Cleanup(zipkinPortForwardPID); err != nil { + logf("Encountered error killing port-forward process in CleanupZipkingTracingSetup() : %v", err) + return + } + + ZipkinTracingEnabled = false + }) +} + +// CheckZipkinPortAvailability checks to see if Zipkin Port is available on the machine. +// returns error if the port is not available. +func CheckZipkinPortAvailability() error { + return monitoring.CheckPortAvailability(ZipkinPort) +} + +// JSONTrace returns a trace for the given traceId in JSON format +func JSONTrace(traceID string) (string, error) { + // Check if zipkin port forwarding is setup correctly + if err := CheckZipkinPortAvailability(); err == nil { + return "", err + } + + resp, err := http.Get(ZipkinTraceEndpoint + traceID) + if err != nil { + return "", err + } + defer resp.Body.Close() + + trace, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, trace, "", "\t") + if err != nil { + return "", err + } + + return prettyJSON.String(), nil +} diff --git a/vendor/github.com/knative/pkg/testing/doc.go b/vendor/github.com/knative/pkg/testing/doc.go new file mode 100644 index 000000000..5862dfb06 --- /dev/null +++ b/vendor/github.com/knative/pkg/testing/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=pkg.knative.dev +package testing diff --git a/vendor/github.com/knative/pkg/testing/inner_default_resource.go b/vendor/github.com/knative/pkg/testing/inner_default_resource.go new file mode 100644 index 000000000..5ee675648 --- /dev/null +++ b/vendor/github.com/knative/pkg/testing/inner_default_resource.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + + "github.com/knative/pkg/apis" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InnerDefaultResource is a simple resource that's compatible with our webhook. It differs from +// Resource by not omitting empty `spec`, so can change when it round trips +// JSON -> Golang type -> JSON. +type InnerDefaultResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Note that this does _not_ have omitempty. So when JSON is round tripped through the Golang + // type, `spec: {}` will automatically be injected. + Spec InnerDefaultSpec `json:"spec"` + + // Status is a simple status. + Status InnerDefaultStatus `json:"status,omitempty"` +} + +// InnerDefaultSpec is the spec for InnerDefaultResource. +type InnerDefaultSpec struct { + Generation int64 `json:"generation,omitempty"` + + FieldWithDefault string `json:"fieldWithDefault,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedField string `json:"field,omitempty"` + + SubFields *InnerDefaultSubSpec `json:"subfields,omitempty"` +} + +// InnerDefaultSubSpec is a helper to test strict deprecated validation. +type InnerDefaultSubSpec struct { + // Deprecated: This field is deprecated. + DeprecatedString string `json:"string,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedStringPtr *string `json:"stringPtr,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedInt int64 `json:"int,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedIntPtr *int64 `json:"intPtr,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedMap map[string]string `json:"map,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedSlice []string `json:"slice,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedStruct InnerDefaultStruct `json:"struct,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedStructPtr *InnerDefaultStruct `json:"structPtr,omitempty"` + + InlinedStruct `json:",inline"` + *InlinedPtrStruct `json:",inline"` + + // Deprecated: This field is deprecated. + DeprecatedNotJson string +} + +// Adding complication helper. +type InnerDefaultStruct struct { + FieldAsString string `json:"fieldAsString,omitempty"` + + // Deprecated: This field is deprecated. + DeprecatedField string `json:"field,omitempty"` +} + +type InlinedStruct struct { + // Deprecated: This field is deprecated. + DeprecatedField string `json:"fieldA,omitempty"` + *InlinedPtrStruct `json:",inline"` +} + +type InlinedPtrStruct struct { + // Deprecated: This field is deprecated. + DeprecatedField string `json:"fieldB,omitempty"` +} + +// InnerDefaultStatus is the status for InnerDefaultResource. +type InnerDefaultStatus struct { + FieldAsString string `json:"fieldAsString,omitempty"` +} + +// Check that ImmutableDefaultResource may be validated and defaulted. +var _ apis.Validatable = (*InnerDefaultResource)(nil) +var _ apis.Defaultable = (*InnerDefaultResource)(nil) + +// SetDefaults sets default values. +func (i *InnerDefaultResource) SetDefaults(ctx context.Context) { + i.Spec.SetDefaults(ctx) +} + +// SetDefaults sets default values. +func (cs *InnerDefaultSpec) SetDefaults(ctx context.Context) { + if cs.FieldWithDefault == "" { + cs.FieldWithDefault = "I'm a default." + } +} + +// Validate validates the resource. +func (i *InnerDefaultResource) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + if apis.IsInUpdate(ctx) { + org := apis.GetBaseline(ctx).(*InnerDefaultResource) + errs = apis.CheckDeprecatedUpdate(ctx, i.Spec, org.Spec).ViaField("spec") + if i.Spec.SubFields != nil { + var orgSubFields interface{} + if org != nil && org.Spec.SubFields != nil { + orgSubFields = org.Spec.SubFields + } + + errs = errs.Also(apis.CheckDeprecatedUpdate(ctx, i.Spec.SubFields, orgSubFields).ViaField("spec", "subFields")) + + var orgDepStruct interface{} + if orgSubFields != nil { + orgDepStruct = org.Spec.SubFields.DeprecatedStruct + } + + errs = errs.Also(apis.CheckDeprecatedUpdate(ctx, i.Spec.SubFields.DeprecatedStruct, orgDepStruct).ViaField("spec", "subFields", "deprecatedStruct")) + } + } else { + errs = apis.CheckDeprecated(ctx, i.Spec).ViaField("spec") + if i.Spec.SubFields != nil { + errs = errs.Also(apis.CheckDeprecated(ctx, i.Spec.SubFields).ViaField("spec", "subFields"). + Also(apis.CheckDeprecated(ctx, i.Spec.SubFields.DeprecatedStruct).ViaField("deprecatedStruct"))) + } + } + return errs +} diff --git a/vendor/github.com/knative/pkg/testing/register.go b/vendor/github.com/knative/pkg/testing/register.go new file mode 100644 index 000000000..911c90fbd --- /dev/null +++ b/vendor/github.com/knative/pkg/testing/register.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: "pkg.knative.dev", Version: "v2"} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes( + SchemeGroupVersion, + &Resource{}, + (&Resource{}).GetListType(), + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/knative/pkg/testing/resource.go b/vendor/github.com/knative/pkg/testing/resource.go new file mode 100644 index 000000000..d553f8d47 --- /dev/null +++ b/vendor/github.com/knative/pkg/testing/resource.go @@ -0,0 +1,190 @@ +/* +Copyright 2017 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "fmt" + + "github.com/knative/pkg/apis" + "github.com/knative/pkg/kmp" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Resource is a simple resource that's compatible with our webhook +type Resource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ResourceSpec `json:"spec,omitempty"` +} + +const ( + // CreatorAnnotation is the annotation that denotes the user that created the resource. + CreatorAnnotation = "testing.knative.dev/creator" + // UpdaterAnnotation is the annotation that denotes the user that last updated the resource. + UpdaterAnnotation = "testing.knative.dev/updater" +) + +// Check that Resource may be validated and defaulted. +var _ apis.Validatable = (*Resource)(nil) +var _ apis.Defaultable = (*Resource)(nil) +var _ apis.Immutable = (*Resource)(nil) +var _ apis.Listable = (*Resource)(nil) + +// ResourceSpec represents test resource spec. +type ResourceSpec struct { + FieldWithDefault string `json:"fieldWithDefault,omitempty"` + FieldWithContextDefault string `json:"fieldWithContextDefault,omitempty"` + FieldWithValidation string `json:"fieldWithValidation,omitempty"` + FieldThatsImmutable string `json:"fieldThatsImmutable,omitempty"` + FieldThatsImmutableWithDefault string `json:"fieldThatsImmutableWithDefault,omitempty"` +} + +// SetDefaults sets the defaults on the object. +func (c *Resource) SetDefaults(ctx context.Context) { + c.Spec.SetDefaults(ctx) + + if apis.IsInUpdate(ctx) { + old := apis.GetBaseline(ctx).(*Resource) + c.AnnotateUserInfo(ctx, old, apis.GetUserInfo(ctx)) + } else { + c.AnnotateUserInfo(ctx, nil, apis.GetUserInfo(ctx)) + } +} + +// AnnotateUserInfo satisfies the Annotatable interface. +func (c *Resource) AnnotateUserInfo(ctx context.Context, prev *Resource, ui *authenticationv1.UserInfo) { + a := c.ObjectMeta.GetAnnotations() + if a == nil { + a = map[string]string{} + } + userName := ui.Username + + // If previous is nil (i.e. this is `Create` operation), + // then we set both fields. + // Otherwise copy creator from the previous state. + if prev == nil { + a[CreatorAnnotation] = userName + } else { + // No spec update ==> bail out. + if ok, _ := kmp.SafeEqual(prev.Spec, c.Spec); ok { + if prev.ObjectMeta.GetAnnotations() != nil { + a[CreatorAnnotation] = prev.ObjectMeta.GetAnnotations()[CreatorAnnotation] + userName = prev.ObjectMeta.GetAnnotations()[UpdaterAnnotation] + } + } else { + if prev.ObjectMeta.GetAnnotations() != nil { + a[CreatorAnnotation] = prev.ObjectMeta.GetAnnotations()[CreatorAnnotation] + } + } + } + // Regardless of `old` set the updater. + a[UpdaterAnnotation] = userName + c.ObjectMeta.SetAnnotations(a) +} + +func (c *Resource) Validate(ctx context.Context) *apis.FieldError { + err := c.Spec.Validate(ctx).ViaField("spec") + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*Resource) + err = err.Also(c.CheckImmutableFields(ctx, original)) + } + return err +} + +type onContextKey struct{} + +// WithValue returns a WithContext for attaching an OnContext with the given value. +func WithValue(ctx context.Context, val string) context.Context { + return context.WithValue(ctx, onContextKey{}, &OnContext{Value: val}) +} + +// OnContext is a struct for holding a value attached to a context. +type OnContext struct { + Value string +} + +// SetDefaults sets the defaults on the spec. +func (cs *ResourceSpec) SetDefaults(ctx context.Context) { + if cs.FieldWithDefault == "" { + cs.FieldWithDefault = "I'm a default." + } + if cs.FieldWithContextDefault == "" { + oc, ok := ctx.Value(onContextKey{}).(*OnContext) + if ok { + cs.FieldWithContextDefault = oc.Value + } + } + if cs.FieldThatsImmutableWithDefault == "" { + cs.FieldThatsImmutableWithDefault = "this is another default value" + } +} + +func (cs *ResourceSpec) Validate(ctx context.Context) *apis.FieldError { + if cs.FieldWithValidation != "magic value" { + return apis.ErrInvalidValue(cs.FieldWithValidation, "fieldWithValidation") + } + return nil +} + +func (current *Resource) CheckImmutableFields(ctx context.Context, og apis.Immutable) *apis.FieldError { + original, ok := og.(*Resource) + if !ok { + return &apis.FieldError{Message: "The provided original was not a Resource"} + } + + if original.Spec.FieldThatsImmutable != current.Spec.FieldThatsImmutable { + return &apis.FieldError{ + Message: "Immutable field changed", + Paths: []string{"spec.fieldThatsImmutable"}, + Details: fmt.Sprintf("got: %v, want: %v", current.Spec.FieldThatsImmutable, + original.Spec.FieldThatsImmutable), + } + } + + if original.Spec.FieldThatsImmutableWithDefault != current.Spec.FieldThatsImmutableWithDefault { + return &apis.FieldError{ + Message: "Immutable field changed", + Paths: []string{"spec.fieldThatsImmutableWithDefault"}, + Details: fmt.Sprintf("got: %v, want: %v", current.Spec.FieldThatsImmutableWithDefault, + original.Spec.FieldThatsImmutableWithDefault), + } + } + return nil +} + +// GetListType implements apis.Listable +func (r *Resource) GetListType() runtime.Object { + return &ResourceList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceList is a list of Resource resources +type ResourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Resource `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/testing/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/testing/zz_generated.deepcopy.go new file mode 100644 index 000000000..7ae6d1791 --- /dev/null +++ b/vendor/github.com/knative/pkg/testing/zz_generated.deepcopy.go @@ -0,0 +1,285 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package testing + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InlinedPtrStruct) DeepCopyInto(out *InlinedPtrStruct) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InlinedPtrStruct. +func (in *InlinedPtrStruct) DeepCopy() *InlinedPtrStruct { + if in == nil { + return nil + } + out := new(InlinedPtrStruct) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InlinedStruct) DeepCopyInto(out *InlinedStruct) { + *out = *in + if in.InlinedPtrStruct != nil { + in, out := &in.InlinedPtrStruct, &out.InlinedPtrStruct + *out = new(InlinedPtrStruct) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InlinedStruct. +func (in *InlinedStruct) DeepCopy() *InlinedStruct { + if in == nil { + return nil + } + out := new(InlinedStruct) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InnerDefaultResource) DeepCopyInto(out *InnerDefaultResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InnerDefaultResource. +func (in *InnerDefaultResource) DeepCopy() *InnerDefaultResource { + if in == nil { + return nil + } + out := new(InnerDefaultResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InnerDefaultResource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InnerDefaultSpec) DeepCopyInto(out *InnerDefaultSpec) { + *out = *in + if in.SubFields != nil { + in, out := &in.SubFields, &out.SubFields + *out = new(InnerDefaultSubSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InnerDefaultSpec. +func (in *InnerDefaultSpec) DeepCopy() *InnerDefaultSpec { + if in == nil { + return nil + } + out := new(InnerDefaultSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InnerDefaultStatus) DeepCopyInto(out *InnerDefaultStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InnerDefaultStatus. +func (in *InnerDefaultStatus) DeepCopy() *InnerDefaultStatus { + if in == nil { + return nil + } + out := new(InnerDefaultStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InnerDefaultStruct) DeepCopyInto(out *InnerDefaultStruct) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InnerDefaultStruct. +func (in *InnerDefaultStruct) DeepCopy() *InnerDefaultStruct { + if in == nil { + return nil + } + out := new(InnerDefaultStruct) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InnerDefaultSubSpec) DeepCopyInto(out *InnerDefaultSubSpec) { + *out = *in + if in.DeprecatedStringPtr != nil { + in, out := &in.DeprecatedStringPtr, &out.DeprecatedStringPtr + *out = new(string) + **out = **in + } + if in.DeprecatedIntPtr != nil { + in, out := &in.DeprecatedIntPtr, &out.DeprecatedIntPtr + *out = new(int64) + **out = **in + } + if in.DeprecatedMap != nil { + in, out := &in.DeprecatedMap, &out.DeprecatedMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.DeprecatedSlice != nil { + in, out := &in.DeprecatedSlice, &out.DeprecatedSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.DeprecatedStruct = in.DeprecatedStruct + if in.DeprecatedStructPtr != nil { + in, out := &in.DeprecatedStructPtr, &out.DeprecatedStructPtr + *out = new(InnerDefaultStruct) + **out = **in + } + in.InlinedStruct.DeepCopyInto(&out.InlinedStruct) + if in.InlinedPtrStruct != nil { + in, out := &in.InlinedPtrStruct, &out.InlinedPtrStruct + *out = new(InlinedPtrStruct) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InnerDefaultSubSpec. +func (in *InnerDefaultSubSpec) DeepCopy() *InnerDefaultSubSpec { + if in == nil { + return nil + } + out := new(InnerDefaultSubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnContext) DeepCopyInto(out *OnContext) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnContext. +func (in *OnContext) DeepCopy() *OnContext { + if in == nil { + return nil + } + out := new(OnContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resource) DeepCopyInto(out *Resource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resource. +func (in *Resource) DeepCopy() *Resource { + if in == nil { + return nil + } + out := new(Resource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Resource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceList) DeepCopyInto(out *ResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Resource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in *ResourceList) DeepCopy() *ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/github.com/knative/pkg/tracing/config/doc.go similarity index 74% rename from vendor/k8s.io/api/node/v1alpha1/doc.go rename to vendor/github.com/knative/pkg/tracing/config/doc.go index dfe99540b..757ea4057 100644 --- a/vendor/k8s.io/api/node/v1alpha1/doc.go +++ b/vendor/github.com/knative/pkg/tracing/config/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true -// +groupName=node.k8s.io - -package v1alpha1 // import "k8s.io/api/node/v1alpha1" +// Package config holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Revision controller depends. +package config diff --git a/vendor/github.com/knative/pkg/tracing/config/tracing.go b/vendor/github.com/knative/pkg/tracing/config/tracing.go new file mode 100644 index 000000000..5d21fcf5d --- /dev/null +++ b/vendor/github.com/knative/pkg/tracing/config/tracing.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "errors" + "fmt" + "strconv" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // ConfigName is the name of the configmap + ConfigName = "config-tracing" + + enableKey = "enable" + zipkinEndpointKey = "zipkin-endpoint" + debugKey = "debug" + sampleRateKey = "sample-rate" +) + +// Config holds the configuration for tracers +type Config struct { + Enable bool + ZipkinEndpoint string + Debug bool + SampleRate float64 +} + +// Equals returns true if two Configs are identical +func (cfg *Config) Equals(other *Config) bool { + return other.Enable == cfg.Enable && other.ZipkinEndpoint == cfg.ZipkinEndpoint && other.Debug == cfg.Debug && other.SampleRate == cfg.SampleRate +} + +// NewTracingConfigFromMap returns a Config given a map corresponding to a ConfigMap +func NewTracingConfigFromMap(cfgMap map[string]string) (*Config, error) { + tc := Config{ + Enable: false, + Debug: false, + SampleRate: 0.1, + } + if enable, ok := cfgMap[enableKey]; ok { + enableBool, err := strconv.ParseBool(enable) + if err != nil { + return nil, fmt.Errorf("Failed parsing tracing config %q: %v", enableKey, err) + } + tc.Enable = enableBool + } + + if endpoint, ok := cfgMap[zipkinEndpointKey]; !ok { + if tc.Enable { + return nil, errors.New("Tracing enabled but no zipkin endpoint specified") + } + } else { + tc.ZipkinEndpoint = endpoint + } + + if debug, ok := cfgMap[debugKey]; ok { + debugBool, err := strconv.ParseBool(debug) + if err != nil { + return nil, fmt.Errorf("Failed parsing tracing config %q", debugKey) + } + tc.Debug = debugBool + } + + if sampleRate, ok := cfgMap[sampleRateKey]; ok { + sampleRateFloat, err := strconv.ParseFloat(sampleRate, 64) + if err != nil { + return nil, fmt.Errorf("Failed to parse sampleRate in tracing config: %v", err) + } + tc.SampleRate = sampleRateFloat + } + + return &tc, nil +} + +// NewTracingConfigFromConfigMap returns a Config for the given configmap +func NewTracingConfigFromConfigMap(config *corev1.ConfigMap) (*Config, error) { + return NewTracingConfigFromMap(config.Data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/github.com/knative/pkg/tracing/config/zz_generated.deepcopy.go similarity index 51% rename from vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go rename to vendor/github.com/knative/pkg/tracing/config/zz_generated.deepcopy.go index 8751d0524..6889d929d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go +++ b/vendor/github.com/knative/pkg/tracing/config/zz_generated.deepcopy.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2019 The Kubernetes Authors. +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,33 +16,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config -import ( - "k8s.io/apimachinery/pkg/runtime" -) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + return +} -func (in *TableRow) DeepCopy() *TableRow { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { if in == nil { return nil } - - out := new(TableRow) - - if in.Cells != nil { - out.Cells = make([]interface{}, len(in.Cells)) - for i := range in.Cells { - out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) - } - } - - if in.Conditions != nil { - out.Conditions = make([]TableRowCondition, len(in.Conditions)) - for i := range in.Conditions { - in.Conditions[i].DeepCopyInto(&out.Conditions[i]) - } - } - - in.Object.DeepCopyInto(&out.Object) + out := new(Config) + in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/knative/pkg/tracing/http.go b/vendor/github.com/knative/pkg/tracing/http.go new file mode 100644 index 000000000..c0ecf4723 --- /dev/null +++ b/vendor/github.com/knative/pkg/tracing/http.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracing + +import ( + "net/http" + + "go.opencensus.io/plugin/ochttp" +) + +// HTTPSpanMiddleware is a http.Handler middleware to create spans for the HTTP endpoint +func HTTPSpanMiddleware(next http.Handler) http.Handler { + return &ochttp.Handler{Handler: next} +} diff --git a/vendor/github.com/knative/pkg/tracing/opencensus.go b/vendor/github.com/knative/pkg/tracing/opencensus.go new file mode 100644 index 000000000..e022164f3 --- /dev/null +++ b/vendor/github.com/knative/pkg/tracing/opencensus.go @@ -0,0 +1,136 @@ +package tracing + +import ( + "errors" + "sync" + + "github.com/knative/pkg/tracing/config" + zipkinmodel "github.com/openzipkin/zipkin-go/model" + zipkinreporter "github.com/openzipkin/zipkin-go/reporter" + "go.opencensus.io/exporter/zipkin" + "go.opencensus.io/trace" +) + +// ConfigOption is the interface for adding additional exporters and configuring opencensus tracing. +type ConfigOption func(*config.Config) + +// OpenCensusTracer is responsible for managing and updating configuration of OpenCensus tracing +type OpenCensusTracer struct { + curCfg *config.Config + configOptions []ConfigOption + zipkinReporter zipkinreporter.Reporter + zipkinExporter trace.Exporter +} + +// OpenCensus tracing keeps state in globals and therefore we can only run one OpenCensusTracer +var ( + octMutex sync.Mutex + globalOct *OpenCensusTracer +) + +func NewOpenCensusTracer(configOptions ...ConfigOption) *OpenCensusTracer { + return &OpenCensusTracer{ + configOptions: configOptions, + } +} + +func (oct *OpenCensusTracer) ApplyConfig(cfg *config.Config) error { + err := oct.acquireGlobal() + defer octMutex.Unlock() + if err != nil { + return err + } + + // Short circuit if our config hasnt changed + if oct.curCfg != nil && oct.curCfg.Equals(cfg) { + return nil + } + + // Apply config options + for _, configOpt := range oct.configOptions { + configOpt(cfg) + } + + // Set config + trace.ApplyConfig(*createOCTConfig(cfg)) + + return nil +} + +func (oct *OpenCensusTracer) Finish() error { + err := oct.acquireGlobal() + defer octMutex.Unlock() + if err != nil { + return errors.New("Finish called on OpenTracer which is not the global OpenCensusTracer.") + } + + for _, configOpt := range oct.configOptions { + configOpt(nil) + } + globalOct = nil + + return nil +} + +func (oct *OpenCensusTracer) acquireGlobal() error { + octMutex.Lock() + + if globalOct == nil { + globalOct = oct + } else if globalOct != oct { + return errors.New("A OpenCensusTracer already exists and only one can be run at a time.") + } + + return nil +} + +func createOCTConfig(cfg *config.Config) *trace.Config { + octCfg := trace.Config{} + + if cfg.Enable { + if cfg.Debug { + octCfg.DefaultSampler = trace.AlwaysSample() + } else { + octCfg.DefaultSampler = trace.ProbabilitySampler(cfg.SampleRate) + } + } else { + octCfg.DefaultSampler = trace.NeverSample() + } + + return &octCfg +} + +func WithZipkinExporter(reporterFact ZipkinReporterFactory, endpoint *zipkinmodel.Endpoint) ConfigOption { + return func(cfg *config.Config) { + var ( + reporter zipkinreporter.Reporter + exporter trace.Exporter + ) + + if cfg != nil && cfg.Enable { + // Initialize our reporter / exporter + // do this before cleanup to minimize time where we have duplicate exporters + reporter, err := reporterFact(cfg) + if err != nil { + // TODO(greghaynes) log this error + return + } + exporter := zipkin.NewExporter(reporter, endpoint) + trace.RegisterExporter(exporter) + } + + // We know this is set because we are called with acquireGlobal lock held + oct := globalOct + if oct.zipkinExporter != nil { + trace.UnregisterExporter(oct.zipkinExporter) + } + + if oct.zipkinReporter != nil { + // TODO(greghaynes) log this error + _ = oct.zipkinReporter.Close() + } + + oct.zipkinReporter = reporter + oct.zipkinExporter = exporter + } +} diff --git a/vendor/github.com/knative/pkg/tracing/zipkin.go b/vendor/github.com/knative/pkg/tracing/zipkin.go new file mode 100644 index 000000000..c455c47ed --- /dev/null +++ b/vendor/github.com/knative/pkg/tracing/zipkin.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracing + +import ( + zipkinreporter "github.com/openzipkin/zipkin-go/reporter" + httpreporter "github.com/openzipkin/zipkin-go/reporter/http" + + "github.com/knative/pkg/tracing/config" +) + +// ZipkinReporterFactory is a factory function which creates a reporter given a config +type ZipkinReporterFactory func(*config.Config) (zipkinreporter.Reporter, error) + +// CreateZipkinReporter returns a zipkin reporter. If EndpointURL is not specified it returns +// a noop reporter +func CreateZipkinReporter(cfg *config.Config) (zipkinreporter.Reporter, error) { + if cfg.ZipkinEndpoint == "" { + return zipkinreporter.NewNoopReporter(), nil + } + return httpreporter.NewReporter(cfg.ZipkinEndpoint), nil +} diff --git a/vendor/github.com/knative/pkg/tracker/doc.go b/vendor/github.com/knative/pkg/tracker/doc.go new file mode 100644 index 000000000..a54e6affe --- /dev/null +++ b/vendor/github.com/knative/pkg/tracker/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package tracker defines a utility to enable Reconcilers to trigger +// reconciliations when objects that are cross-referenced change, so +// that the level-based reconciliation can react to the change. The +// prototypical cross-reference in Kubernetes is corev1.ObjectReference. +package tracker diff --git a/vendor/github.com/knative/pkg/tracker/enqueue.go b/vendor/github.com/knative/pkg/tracker/enqueue.go new file mode 100644 index 000000000..ed225c0c4 --- /dev/null +++ b/vendor/github.com/knative/pkg/tracker/enqueue.go @@ -0,0 +1,169 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracker + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/client-go/tools/cache" + + "github.com/knative/pkg/kmeta" +) + +// New returns an implementation of Interface that lets a Reconciler +// register a particular resource as watching an ObjectReference for +// a particular lease duration. This watch must be refreshed +// periodically (e.g. by a controller resync) or it will expire. +// +// When OnChanged is called by the informer for a particular +// GroupVersionKind, the provided callback is called with the "key" +// of each object actively watching the changed object. +func New(callback func(string), lease time.Duration) Interface { + return &impl{ + leaseDuration: lease, + cb: callback, + } +} + +type impl struct { + m sync.Mutex + // mapping maps from an object reference to the set of + // keys for objects watching it. + mapping map[corev1.ObjectReference]set + + // The amount of time that an object may watch another + // before having to renew the lease. + leaseDuration time.Duration + + cb func(string) +} + +// Check that impl implements Interface. +var _ Interface = (*impl)(nil) + +// set is a map from keys to expirations +type set map[string]time.Time + +// Track implements Interface. +func (i *impl) Track(ref corev1.ObjectReference, obj interface{}) error { + invalidFields := map[string][]string{ + "APIVersion": validation.IsQualifiedName(ref.APIVersion), + "Kind": validation.IsCIdentifier(ref.Kind), + "Namespace": validation.IsDNS1123Label(ref.Namespace), + "Name": validation.IsDNS1123Subdomain(ref.Name), + } + fieldErrors := []string{} + for k, v := range invalidFields { + for _, msg := range v { + fieldErrors = append(fieldErrors, fmt.Sprintf("%s: %s", k, msg)) + } + } + if len(fieldErrors) > 0 { + sort.Strings(fieldErrors) + return fmt.Errorf("Invalid ObjectReference:\n%s", strings.Join(fieldErrors, "\n")) + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return err + } + + i.m.Lock() + defer i.m.Unlock() + if i.mapping == nil { + i.mapping = make(map[corev1.ObjectReference]set) + } + + l, ok := i.mapping[ref] + if !ok { + l = set{} + } + if expiry, ok := l[key]; !ok || isExpired(expiry) { + // When covering an uncovered key, immediately call the + // registered callback to ensure that the following pattern + // doesn't create problems: + // foo, err := lister.Get(key) + // // Later... + // err := tracker.Track(fooRef, parent) + // In this example, "Later" represents a window where "foo" may + // have changed or been created while the Track is not active. + // The simplest way of eliminating such a window is to call the + // callback to "catch up" immediately following new + // registrations. + i.cb(key) + } + // Overwrite the key with a new expiration. + l[key] = time.Now().Add(i.leaseDuration) + + i.mapping[ref] = l + return nil +} + +func objectReference(item kmeta.Accessor) corev1.ObjectReference { + gvk := item.GroupVersionKind() + apiVersion, kind := gvk.ToAPIVersionAndKind() + return corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kind, + Namespace: item.GetNamespace(), + Name: item.GetName(), + } +} + +func isExpired(expiry time.Time) bool { + return time.Now().After(expiry) +} + +// OnChanged implements Interface. +func (i *impl) OnChanged(obj interface{}) { + item, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + // TODO(mattmoor): We should consider logging here. + return + } + + or := objectReference(item) + + // TODO(mattmoor): Consider locking the mapping (global) for a + // smaller scope and leveraging a per-set lock to guard its access. + i.m.Lock() + defer i.m.Unlock() + s, ok := i.mapping[or] + if !ok { + // TODO(mattmoor): We should consider logging here. + return + } + + for key, expiry := range s { + // If the expiration has lapsed, then delete the key. + if isExpired(expiry) { + delete(s, key) + continue + } + i.cb(key) + } + + if len(s) == 0 { + delete(i.mapping, or) + } +} diff --git a/vendor/github.com/knative/pkg/tracker/interface.go b/vendor/github.com/knative/pkg/tracker/interface.go new file mode 100644 index 000000000..6481a839d --- /dev/null +++ b/vendor/github.com/knative/pkg/tracker/interface.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracker + +import ( + corev1 "k8s.io/api/core/v1" +) + +// Interface defines the interface through which an object can register +// that it is tracking another object by reference. +type Interface interface { + // Track tells us that "obj" is tracking changes to the + // referenced object. + Track(ref corev1.ObjectReference, obj interface{}) error + + // OnChanged is a callback to register with the InformerFactory + // so that we are notified for appropriate object changes. + OnChanged(obj interface{}) +} diff --git a/vendor/github.com/knative/pkg/version/version.go b/vendor/github.com/knative/pkg/version/version.go new file mode 100644 index 000000000..86e2db4b5 --- /dev/null +++ b/vendor/github.com/knative/pkg/version/version.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + + "github.com/rogpeppe/go-internal/semver" + "k8s.io/apimachinery/pkg/version" +) + +// ServerVersioner is an interface to mock the `ServerVersion` +// method of the Kubernetes client's Discovery interface. +// In an application `kubeClient.Discovery()` can be used to +// suffice this interface. +type ServerVersioner interface { + ServerVersion() (*version.Info, error) +} + +var minimumVersion = "v1.11.0" + +// CheckMinimumVersion checks if the currently installed version of +// Kubernetes is compatible with the minimum version required. +// Returns an error if its not. +// +// A Kubernetes discovery client can be passed in as the versioner +// like `CheckMinimumVersion(kubeClient.Discovery())`. +func CheckMinimumVersion(versioner ServerVersioner) error { + v, err := versioner.ServerVersion() + if err != nil { + return err + } + currentVersion := semver.Canonical(v.String()) + + // Compare returns 1 if the first version is greater than the + // second version. + if semver.Compare(minimumVersion, currentVersion) == 1 { + return fmt.Errorf("kubernetes version %q is not compatible, need at least %q", currentVersion, minimumVersion) + } + return nil +} diff --git a/vendor/github.com/knative/pkg/webhook/OWNERS b/vendor/github.com/knative/pkg/webhook/OWNERS new file mode 100644 index 000000000..b87878d94 --- /dev/null +++ b/vendor/github.com/knative/pkg/webhook/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- webhook-approvers diff --git a/vendor/github.com/knative/pkg/webhook/certs.go b/vendor/github.com/knative/pkg/webhook/certs.go new file mode 100644 index 000000000..ba0ee7393 --- /dev/null +++ b/vendor/github.com/knative/pkg/webhook/certs.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "math/big" + "time" + + "go.uber.org/zap" + + "github.com/knative/pkg/logging" +) + +const ( + organization = "knative.dev" +) + +// Create the common parts of the cert. These don't change between +// the root/CA cert and the server cert. +func createCertTemplate(name, namespace string) (*x509.Certificate, error) { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, errors.New("failed to generate serial number: " + err.Error()) + } + + serviceName := name + "." + namespace + serviceNames := []string{ + name, + serviceName, + serviceName + ".svc", + serviceName + ".svc.cluster.local", + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{organization}}, + SignatureAlgorithm: x509.SHA256WithRSA, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), // valid for 1 years + BasicConstraintsValid: true, + DNSNames: serviceNames, + } + return &tmpl, nil +} + +// Create cert template suitable for CA and hence signing +func createCACertTemplate(name, namespace string) (*x509.Certificate, error) { + rootCert, err := createCertTemplate(name, namespace) + if err != nil { + return nil, err + } + // Make it into a CA cert and change it so we can use it to sign certs + rootCert.IsCA = true + rootCert.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature + rootCert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} + return rootCert, nil +} + +// Create cert template that we can use on the server for TLS +func createServerCertTemplate(name, namespace string) (*x509.Certificate, error) { + serverCert, err := createCertTemplate(name, namespace) + if err != nil { + return nil, err + } + serverCert.KeyUsage = x509.KeyUsageDigitalSignature + serverCert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} + return serverCert, err +} + +// Actually sign the cert and return things in a form that we can use later on +func createCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) ( + cert *x509.Certificate, certPEM []byte, err error) { + + certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv) + if err != nil { + return + } + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return + } + b := pem.Block{Type: "CERTIFICATE", Bytes: certDER} + certPEM = pem.EncodeToMemory(&b) + return +} + +func createCA(ctx context.Context, name, namespace string) (*rsa.PrivateKey, *x509.Certificate, []byte, error) { + logger := logging.FromContext(ctx) + rootKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + logger.Errorw("error generating random key", zap.Error(err)) + return nil, nil, nil, err + } + + rootCertTmpl, err := createCACertTemplate(name, namespace) + if err != nil { + logger.Errorw("error generating CA cert", zap.Error(err)) + return nil, nil, nil, err + } + + rootCert, rootCertPEM, err := createCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey) + if err != nil { + logger.Errorw("error signing the CA cert", zap.Error(err)) + return nil, nil, nil, err + } + return rootKey, rootCert, rootCertPEM, nil +} + +// CreateCerts creates and returns a CA certificate and certificate and +// key for the server. serverKey and serverCert are used by the server +// to establish trust for clients, CA certificate is used by the +// client to verify the server authentication chain. +func CreateCerts(ctx context.Context, name, namespace string) (serverKey, serverCert, caCert []byte, err error) { + logger := logging.FromContext(ctx) + // First create a CA certificate and private key + caKey, caCertificate, caCertificatePEM, err := createCA(ctx, name, namespace) + if err != nil { + return nil, nil, nil, err + } + + // Then create the private key for the serving cert + servKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + logger.Errorw("error generating random key", zap.Error(err)) + return nil, nil, nil, err + } + servCertTemplate, err := createServerCertTemplate(name, namespace) + if err != nil { + logger.Errorw("failed to create the server certificate template", zap.Error(err)) + return nil, nil, nil, err + } + + // create a certificate which wraps the server's public key, sign it with the CA private key + _, servCertPEM, err := createCert(servCertTemplate, caCertificate, &servKey.PublicKey, caKey) + if err != nil { + logger.Errorw("error signing server certificate template", zap.Error(err)) + return nil, nil, nil, err + } + servKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(servKey), + }) + return servKeyPEM, servCertPEM, caCertificatePEM, nil +} diff --git a/vendor/github.com/knative/pkg/webhook/webhook.go b/vendor/github.com/knative/pkg/webhook/webhook.go new file mode 100644 index 000000000..8ed641765 --- /dev/null +++ b/vendor/github.com/knative/pkg/webhook/webhook.go @@ -0,0 +1,619 @@ +/* +Copyright 2017 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "sort" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/knative/pkg/apis" + "github.com/knative/pkg/apis/duck" + "github.com/knative/pkg/kmp" + "github.com/knative/pkg/logging" + "github.com/knative/pkg/logging/logkey" + + "github.com/markbates/inflect" + "github.com/mattbaird/jsonpatch" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + clientadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" +) + +const ( + secretServerKey = "server-key.pem" + secretServerCert = "server-cert.pem" + secretCACert = "ca-cert.pem" +) + +var ( + deploymentKind = appsv1.SchemeGroupVersion.WithKind("Deployment") + errMissingNewObject = errors.New("the new object may not be nil") +) + +// ControllerOptions contains the configuration for the webhook +type ControllerOptions struct { + // WebhookName is the name of the webhook we create to handle + // mutations before they get stored in the storage. + WebhookName string + + // ServiceName is the service name of the webhook. + ServiceName string + + // DeploymentName is the service name of the webhook. + DeploymentName string + + // SecretName is the name of k8s secret that contains the webhook + // server key/cert and corresponding CA cert that signed them. The + // server key/cert are used to serve the webhook and the CA cert + // is provided to k8s apiserver during admission controller + // registration. + SecretName string + + // Namespace is the namespace in which everything above lives. + Namespace string + + // Port where the webhook is served. Per k8s admission + // registration requirements this should be 443 unless there is + // only a single port for the service. + Port int + + // RegistrationDelay controls how long admission registration + // occurs after the webhook is started. This is used to avoid + // potential races where registration completes and k8s apiserver + // invokes the webhook before the HTTP server is started. + RegistrationDelay time.Duration + + // ClientAuthType declares the policy the webhook server will follow for + // TLS Client Authentication. + // The default value is tls.NoClientCert. + ClientAuth tls.ClientAuthType +} + +// ResourceCallback defines a signature for resource specific (Route, Configuration, etc.) +// handlers that can validate and mutate an object. If non-nil error is returned, object creation +// is denied. Mutations should be appended to the patches operations. +type ResourceCallback func(patches *[]jsonpatch.JsonPatchOperation, old GenericCRD, new GenericCRD) error + +// ResourceDefaulter defines a signature for resource specific (Route, Configuration, etc.) +// handlers that can set defaults on an object. If non-nil error is returned, object creation +// is denied. Mutations should be appended to the patches operations. +type ResourceDefaulter func(patches *[]jsonpatch.JsonPatchOperation, crd GenericCRD) error + +// AdmissionController implements the external admission webhook for validation of +// pilot configuration. +type AdmissionController struct { + Client kubernetes.Interface + Options ControllerOptions + Handlers map[schema.GroupVersionKind]GenericCRD + Logger *zap.SugaredLogger + + WithContext func(context.Context) context.Context + DisallowUnknownFields bool +} + +func nop(ctx context.Context) context.Context { + return ctx +} + +// GenericCRD is the interface definition that allows us to perform the generic +// CRD actions like deciding whether to increment generation and so forth. +type GenericCRD interface { + apis.Defaultable + apis.Validatable + runtime.Object +} + +// GetAPIServerExtensionCACert gets the Kubernetes aggregate apiserver +// client CA cert used by validator. +// +// NOTE: this certificate is provided kubernetes. We do not control +// its name or location. +func getAPIServerExtensionCACert(cl kubernetes.Interface) ([]byte, error) { + const name = "extension-apiserver-authentication" + c, err := cl.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + const caFileName = "requestheader-client-ca-file" + pem, ok := c.Data[caFileName] + if !ok { + return nil, fmt.Errorf("cannot find %s in ConfigMap %s: ConfigMap.Data is %#v", caFileName, name, c.Data) + } + return []byte(pem), nil +} + +// MakeTLSConfig makes a TLS configuration suitable for use with the server +func makeTLSConfig(serverCert, serverKey, caCert []byte, clientAuthType tls.ClientAuthType) (*tls.Config, error) { + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + cert, err := tls.X509KeyPair(serverCert, serverKey) + if err != nil { + return nil, err + } + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: caCertPool, + ClientAuth: clientAuthType, + }, nil +} + +func getOrGenerateKeyCertsFromSecret(ctx context.Context, client kubernetes.Interface, + options *ControllerOptions) (serverKey, serverCert, caCert []byte, err error) { + logger := logging.FromContext(ctx) + secret, err := client.CoreV1().Secrets(options.Namespace).Get(options.SecretName, metav1.GetOptions{}) + if err != nil { + if !apierrors.IsNotFound(err) { + return nil, nil, nil, err + } + logger.Info("Did not find existing secret, creating one") + newSecret, err := generateSecret(ctx, options) + if err != nil { + return nil, nil, nil, err + } + secret, err = client.CoreV1().Secrets(newSecret.Namespace).Create(newSecret) + if err != nil && !apierrors.IsAlreadyExists(err) { + return nil, nil, nil, err + } + // Ok, so something else might have created, try fetching it one more time + secret, err = client.CoreV1().Secrets(options.Namespace).Get(options.SecretName, metav1.GetOptions{}) + if err != nil { + return nil, nil, nil, err + } + } + + var ok bool + if serverKey, ok = secret.Data[secretServerKey]; !ok { + return nil, nil, nil, errors.New("server key missing") + } + if serverCert, ok = secret.Data[secretServerCert]; !ok { + return nil, nil, nil, errors.New("server cert missing") + } + if caCert, ok = secret.Data[secretCACert]; !ok { + return nil, nil, nil, errors.New("ca cert missing") + } + return serverKey, serverCert, caCert, nil +} + +// validate performs validation on the provided "new" CRD. +// For legacy purposes, this also does apis.Immutable validation, +// which is deprecated and will be removed in a future release. +func validate(ctx context.Context, new GenericCRD) error { + if apis.IsInUpdate(ctx) { + old := apis.GetBaseline(ctx) + if immutableNew, ok := new.(apis.Immutable); ok { + immutableOld, ok := old.(apis.Immutable) + if !ok { + return fmt.Errorf("unexpected type mismatch %T vs. %T", old, new) + } + if err := immutableNew.CheckImmutableFields(ctx, immutableOld); err != nil { + return err + } + } + } + + // Can't just `return new.Validate()` because it doesn't properly nil-check. + if err := new.Validate(ctx); err != nil { + return err + } + + return nil +} + +// setDefaults simply leverages apis.Defaultable to set defaults. +func setDefaults(ctx context.Context, patches duck.JSONPatch, crd GenericCRD) (duck.JSONPatch, error) { + before, after := crd.DeepCopyObject(), crd + after.SetDefaults(ctx) + + patch, err := duck.CreatePatch(before, after) + if err != nil { + return nil, err + } + + return append(patches, patch...), nil +} + +func configureCerts(ctx context.Context, client kubernetes.Interface, options *ControllerOptions) (*tls.Config, []byte, error) { + var apiServerCACert []byte + if options.ClientAuth >= tls.VerifyClientCertIfGiven { + var err error + apiServerCACert, err = getAPIServerExtensionCACert(client) + if err != nil { + return nil, nil, err + } + } + + serverKey, serverCert, caCert, err := getOrGenerateKeyCertsFromSecret(ctx, client, options) + if err != nil { + return nil, nil, err + } + tlsConfig, err := makeTLSConfig(serverCert, serverKey, apiServerCACert, options.ClientAuth) + if err != nil { + return nil, nil, err + } + return tlsConfig, caCert, nil +} + +// Run implements the admission controller run loop. +func (ac *AdmissionController) Run(stop <-chan struct{}) error { + logger := ac.Logger + ctx := logging.WithLogger(context.TODO(), logger) + tlsConfig, caCert, err := configureCerts(ctx, ac.Client, &ac.Options) + if err != nil { + logger.Errorw("could not configure admission webhook certs", zap.Error(err)) + return err + } + + server := &http.Server{ + Handler: ac, + Addr: fmt.Sprintf(":%v", ac.Options.Port), + TLSConfig: tlsConfig, + } + + logger.Info("Found certificates for webhook...") + if ac.Options.RegistrationDelay != 0 { + logger.Infof("Delaying admission webhook registration for %v", ac.Options.RegistrationDelay) + } + + select { + case <-time.After(ac.Options.RegistrationDelay): + cl := ac.Client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations() + if err := ac.register(ctx, cl, caCert); err != nil { + logger.Errorw("failed to register webhook", zap.Error(err)) + return err + } + logger.Info("Successfully registered webhook") + case <-stop: + return nil + } + + serverBootstrapErrCh := make(chan struct{}) + go func() { + if err := server.ListenAndServeTLS("", ""); err != nil { + logger.Errorw("ListenAndServeTLS for admission webhook returned error", zap.Error(err)) + close(serverBootstrapErrCh) + } + }() + + select { + case <-stop: + return server.Close() + case <-serverBootstrapErrCh: + return errors.New("webhook server bootstrap failed") + } +} + +// Register registers the external admission webhook for pilot +// configuration types. +func (ac *AdmissionController) register( + ctx context.Context, client clientadmissionregistrationv1beta1.MutatingWebhookConfigurationInterface, caCert []byte) error { // nolint: lll + logger := logging.FromContext(ctx) + failurePolicy := admissionregistrationv1beta1.Fail + + var rules []admissionregistrationv1beta1.RuleWithOperations + for gvk := range ac.Handlers { + plural := strings.ToLower(inflect.Pluralize(gvk.Kind)) + + rules = append(rules, admissionregistrationv1beta1.RuleWithOperations{ + Operations: []admissionregistrationv1beta1.OperationType{ + admissionregistrationv1beta1.Create, + admissionregistrationv1beta1.Update, + }, + Rule: admissionregistrationv1beta1.Rule{ + APIGroups: []string{gvk.Group}, + APIVersions: []string{gvk.Version}, + Resources: []string{plural + "/*"}, + }, + }) + } + + // Sort the rules by Group, Version, Kind so that things are deterministically ordered. + sort.Slice(rules, func(i, j int) bool { + lhs, rhs := rules[i], rules[j] + if lhs.APIGroups[0] != rhs.APIGroups[0] { + return lhs.APIGroups[0] < rhs.APIGroups[0] + } + if lhs.APIVersions[0] != rhs.APIVersions[0] { + return lhs.APIVersions[0] < rhs.APIVersions[0] + } + return lhs.Resources[0] < rhs.Resources[0] + }) + + webhook := &admissionregistrationv1beta1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: ac.Options.WebhookName, + }, + Webhooks: []admissionregistrationv1beta1.Webhook{{ + Name: ac.Options.WebhookName, + Rules: rules, + ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{ + Service: &admissionregistrationv1beta1.ServiceReference{ + Namespace: ac.Options.Namespace, + Name: ac.Options.ServiceName, + }, + CABundle: caCert, + }, + FailurePolicy: &failurePolicy, + }}, + } + + // Set the owner to our deployment. + deployment, err := ac.Client.Apps().Deployments(ac.Options.Namespace).Get(ac.Options.DeploymentName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to fetch our deployment: %v", err) + } + deploymentRef := metav1.NewControllerRef(deployment, deploymentKind) + webhook.OwnerReferences = append(webhook.OwnerReferences, *deploymentRef) + + // Try to create the webhook and if it already exists validate webhook rules. + _, err = client.Create(webhook) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create a webhook: %v", err) + } + logger.Info("Webhook already exists") + configuredWebhook, err := client.Get(ac.Options.WebhookName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error retrieving webhook: %v", err) + } + if ok, err := kmp.SafeEqual(configuredWebhook.Webhooks, webhook.Webhooks); err != nil { + return fmt.Errorf("error diffing webhooks: %v", err) + } else if !ok { + logger.Info("Updating webhook") + // Set the ResourceVersion as required by update. + webhook.ObjectMeta.ResourceVersion = configuredWebhook.ObjectMeta.ResourceVersion + if _, err := client.Update(webhook); err != nil { + return fmt.Errorf("failed to update webhook: %s", err) + } + } else { + logger.Info("Webhook is already valid") + } + } else { + logger.Info("Created a webhook") + } + return nil +} + +// ServeHTTP implements the external admission webhook for mutating +// serving resources. +func (ac *AdmissionController) ServeHTTP(w http.ResponseWriter, r *http.Request) { + logger := ac.Logger + logger.Infof("Webhook ServeHTTP request=%#v", r) + + // Verify the content type is accurate. + contentType := r.Header.Get("Content-Type") + if contentType != "application/json" { + http.Error(w, "invalid Content-Type, want `application/json`", http.StatusUnsupportedMediaType) + return + } + + var review admissionv1beta1.AdmissionReview + if err := json.NewDecoder(r.Body).Decode(&review); err != nil { + http.Error(w, fmt.Sprintf("could not decode body: %v", err), http.StatusBadRequest) + return + } + + logger = logger.With( + zap.String(logkey.Kind, fmt.Sprint(review.Request.Kind)), + zap.String(logkey.Namespace, review.Request.Namespace), + zap.String(logkey.Name, review.Request.Name), + zap.String(logkey.Operation, fmt.Sprint(review.Request.Operation)), + zap.String(logkey.Resource, fmt.Sprint(review.Request.Resource)), + zap.String(logkey.SubResource, fmt.Sprint(review.Request.SubResource)), + zap.String(logkey.UserInfo, fmt.Sprint(review.Request.UserInfo))) + ctx := logging.WithLogger(r.Context(), logger) + + if ac.WithContext != nil { + ctx = ac.WithContext(ctx) + } + + reviewResponse := ac.admit(ctx, review.Request) + var response admissionv1beta1.AdmissionReview + if reviewResponse != nil { + response.Response = reviewResponse + response.Response.UID = review.Request.UID + } + + logger.Infof("AdmissionReview for %#v: %s/%s response=%#v", + review.Request.Kind, review.Request.Namespace, review.Request.Name, reviewResponse) + + if err := json.NewEncoder(w).Encode(response); err != nil { + http.Error(w, fmt.Sprintf("could encode response: %v", err), http.StatusInternalServerError) + return + } +} + +func makeErrorStatus(reason string, args ...interface{}) *admissionv1beta1.AdmissionResponse { + result := apierrors.NewBadRequest(fmt.Sprintf(reason, args...)).Status() + return &admissionv1beta1.AdmissionResponse{ + Result: &result, + Allowed: false, + } +} + +func (ac *AdmissionController) admit(ctx context.Context, request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + logger := logging.FromContext(ctx) + switch request.Operation { + case admissionv1beta1.Create, admissionv1beta1.Update: + default: + logger.Infof("Unhandled webhook operation, letting it through %v", request.Operation) + return &admissionv1beta1.AdmissionResponse{Allowed: true} + } + + patchBytes, err := ac.mutate(ctx, request) + if err != nil { + return makeErrorStatus("mutation failed: %v", err) + } + logger.Infof("Kind: %q PatchBytes: %v", request.Kind, string(patchBytes)) + + return &admissionv1beta1.AdmissionResponse{ + Patch: patchBytes, + Allowed: true, + PatchType: func() *admissionv1beta1.PatchType { + pt := admissionv1beta1.PatchTypeJSONPatch + return &pt + }(), + } +} + +func (ac *AdmissionController) mutate(ctx context.Context, req *admissionv1beta1.AdmissionRequest) ([]byte, error) { + kind := req.Kind + newBytes := req.Object.Raw + oldBytes := req.OldObject.Raw + // Why, oh why are these different types... + gvk := schema.GroupVersionKind{ + Group: kind.Group, + Version: kind.Version, + Kind: kind.Kind, + } + + logger := logging.FromContext(ctx) + handler, ok := ac.Handlers[gvk] + if !ok { + logger.Errorf("Unhandled kind: %v", gvk) + return nil, fmt.Errorf("unhandled kind: %v", gvk) + } + + // nil values denote absence of `old` (create) or `new` (delete) objects. + var oldObj, newObj GenericCRD + + if len(newBytes) != 0 { + newObj = handler.DeepCopyObject().(GenericCRD) + newDecoder := json.NewDecoder(bytes.NewBuffer(newBytes)) + if ac.DisallowUnknownFields { + newDecoder.DisallowUnknownFields() + } + if err := newDecoder.Decode(&newObj); err != nil { + return nil, fmt.Errorf("cannot decode incoming new object: %v", err) + } + } + if len(oldBytes) != 0 { + oldObj = handler.DeepCopyObject().(GenericCRD) + oldDecoder := json.NewDecoder(bytes.NewBuffer(oldBytes)) + if ac.DisallowUnknownFields { + oldDecoder.DisallowUnknownFields() + } + if err := oldDecoder.Decode(&oldObj); err != nil { + return nil, fmt.Errorf("cannot decode incoming old object: %v", err) + } + } + var patches duck.JSONPatch + + var err error + // Skip this step if the type we're dealing with is a duck type, since it is inherently + // incomplete and this will patch away all of the unspecified fields. + if _, ok := newObj.(duck.Populatable); !ok { + // Add these before defaulting fields, otherwise defaulting may cause an illegal patch + // because it expects the round tripped through Golang fields to be present already. + rtp, err := roundTripPatch(newBytes, newObj) + if err != nil { + return nil, fmt.Errorf("cannot create patch for round tripped newBytes: %v", err) + } + patches = append(patches, rtp...) + } + + // Set up the context for defaulting and validation + if oldObj != nil { + // Copy the old object and set defaults so that we don't reject our own + // defaulting done earlier in the webhook. + oldObj = oldObj.DeepCopyObject().(GenericCRD) + oldObj.SetDefaults(ctx) + + if req.SubResource == "" { + ctx = apis.WithinUpdate(ctx, oldObj) + } else { + ctx = apis.WithinSubResourceUpdate(ctx, oldObj, req.SubResource) + } + } else { + ctx = apis.WithinCreate(ctx) + } + ctx = apis.WithUserInfo(ctx, &req.UserInfo) + + // Default the new object. + if patches, err = setDefaults(ctx, patches, newObj); err != nil { + logger.Errorw("Failed the resource specific defaulter", zap.Error(err)) + // Return the error message as-is to give the defaulter callback + // discretion over (our portion of) the message that the user sees. + return nil, err + } + + // None of the validators will accept a nil value for newObj. + if newObj == nil { + return nil, errMissingNewObject + } + if err := validate(ctx, newObj); err != nil { + logger.Errorw("Failed the resource specific validation", zap.Error(err)) + // Return the error message as-is to give the validation callback + // discretion over (our portion of) the message that the user sees. + return nil, err + } + + return json.Marshal(patches) +} + +// roundTripPatch generates the JSONPatch that corresponds to round tripping the given bytes through +// the Golang type (JSON -> Golang type -> JSON). Because it is not always true that +// bytes == json.Marshal(json.Unmarshal(bytes)). +// +// For example, if bytes did not contain a 'spec' field and the Golang type specifies its 'spec' +// field without omitempty, then by round tripping through the Golang type, we would have added +// `'spec': {}`. +func roundTripPatch(bytes []byte, unmarshalled interface{}) (duck.JSONPatch, error) { + if unmarshalled == nil { + return duck.JSONPatch{}, nil + } + marshaledBytes, err := json.Marshal(unmarshalled) + if err != nil { + return nil, fmt.Errorf("cannot marshal interface: %v", err) + } + return jsonpatch.CreatePatch(bytes, marshaledBytes) +} + +func generateSecret(ctx context.Context, options *ControllerOptions) (*corev1.Secret, error) { + serverKey, serverCert, caCert, err := CreateCerts(ctx, options.ServiceName, options.Namespace) + if err != nil { + return nil, err + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: options.SecretName, + Namespace: options.Namespace, + }, + Data: map[string][]byte{ + secretServerKey: serverKey, + secretServerCert: serverCert, + secretCACert: caCert, + }, + }, nil +} diff --git a/vendor/github.com/knative/pkg/websocket/connection.go b/vendor/github.com/knative/pkg/websocket/connection.go new file mode 100644 index 000000000..e37202e5f --- /dev/null +++ b/vendor/github.com/knative/pkg/websocket/connection.go @@ -0,0 +1,325 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package websocket + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "go.uber.org/zap" + + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/gorilla/websocket" +) + +var ( + // ErrConnectionNotEstablished is returned by methods that need a connection + // but no connection is already created. + ErrConnectionNotEstablished = errors.New("connection has not yet been established") + + // errShuttingDown is returned internally once the shutdown signal has been sent. + errShuttingDown = errors.New("shutdown in progress") + + // pongTimeout defines the amount of time allowed between two pongs to arrive + // before the connection is considered broken. + pongTimeout = 10 * time.Second +) + +// RawConnection is an interface defining the methods needed +// from a websocket connection +type rawConnection interface { + WriteMessage(messageType int, data []byte) error + NextReader() (int, io.Reader, error) + Close() error + + SetReadDeadline(deadline time.Time) error + SetPongHandler(func(string) error) +} + +// ManagedConnection represents a websocket connection. +type ManagedConnection struct { + connection rawConnection + connectionFactory func() (rawConnection, error) + + closeChan chan struct{} + closeOnce sync.Once + + // Used to capture asynchronous processes to be waited + // on when shutting the connection down. + processingWg sync.WaitGroup + + // If set, messages will be forwarded to this channel + messageChan chan []byte + + // This mutex controls access to the connection reference + // itself. + connectionLock sync.RWMutex + + // Gorilla's documentation states, that one reader and + // one writer are allowed concurrently. + readerLock sync.Mutex + writerLock sync.Mutex + + // Used for the exponential backoff when connecting + connectionBackoff wait.Backoff +} + +// NewDurableSendingConnection creates a new websocket connection +// that can only send messages to the endpoint it connects to. +// The connection will continuously be kept alive and reconnected +// in case of a loss of connectivity. +func NewDurableSendingConnection(target string, logger *zap.SugaredLogger) *ManagedConnection { + return NewDurableConnection(target, nil, logger) +} + +// NewDurableConnection creates a new websocket connection, that +// passes incoming messages to the given message channel. It can also +// send messages to the endpoint it connects to. +// The connection will continuously be kept alive and reconnected +// in case of a loss of connectivity. +// +// Note: The given channel needs to be drained after calling `Shutdown` +// to not cause any deadlocks. If the channel's buffer is likely to be +// filled, this needs to happen in separate goroutines, i.e. +// +// go func() {conn.Shutdown(); close(messageChan)} +// go func() {for range messageChan {}} +func NewDurableConnection(target string, messageChan chan []byte, logger *zap.SugaredLogger) *ManagedConnection { + websocketConnectionFactory := func() (rawConnection, error) { + dialer := &websocket.Dialer{ + HandshakeTimeout: 3 * time.Second, + } + conn, _, err := dialer.Dial(target, nil) + return conn, err + } + + c := newConnection(websocketConnectionFactory, messageChan) + + // Keep the connection alive asynchronously and reconnect on + // connection failure. + c.processingWg.Add(1) + go func() { + defer c.processingWg.Done() + + for { + select { + default: + logger.Infof("Connecting to %q", target) + if err := c.connect(); err != nil { + logger.Errorw(fmt.Sprintf("Connecting to %q failed", target), zap.Error(err)) + continue + } + logger.Infof("Connected to %q", target) + if err := c.keepalive(); err != nil { + logger.Errorw(fmt.Sprintf("Connection to %q broke down, reconnecting...", target), zap.Error(err)) + } + if err := c.closeConnection(); err != nil { + logger.Errorw("Failed to close the connection after crashing", zap.Error(err)) + } + case <-c.closeChan: + logger.Infof("Connection to %q is being shutdown", target) + return + } + } + }() + + // Keep sending pings 3 times per pongTimeout interval. + c.processingWg.Add(1) + go func() { + defer c.processingWg.Done() + + ticker := time.NewTicker(pongTimeout / 3) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if err := c.write(websocket.PingMessage, []byte{}); err != nil { + logger.Errorw("Failed to send ping message", zap.Error(err)) + } + case <-c.closeChan: + return + } + } + }() + + return c +} + +// newConnection creates a new connection primitive. +func newConnection(connFactory func() (rawConnection, error), messageChan chan []byte) *ManagedConnection { + conn := &ManagedConnection{ + connectionFactory: connFactory, + closeChan: make(chan struct{}), + messageChan: messageChan, + connectionBackoff: wait.Backoff{ + Duration: 100 * time.Millisecond, + Factor: 1.3, + Steps: 20, + Jitter: 0.5, + }, + } + + return conn +} + +// connect tries to establish a websocket connection. +func (c *ManagedConnection) connect() error { + var err error + wait.ExponentialBackoff(c.connectionBackoff, func() (bool, error) { + select { + default: + var conn rawConnection + conn, err = c.connectionFactory() + if err != nil { + return false, nil + } + + // Setting the read deadline will cause NextReader in read + // to fail if it is exceeded. This deadline is reset each + // time we receive a pong message so we know the connection + // is still intact. + conn.SetReadDeadline(time.Now().Add(pongTimeout)) + conn.SetPongHandler(func(string) error { + conn.SetReadDeadline(time.Now().Add(pongTimeout)) + return nil + }) + + c.connectionLock.Lock() + defer c.connectionLock.Unlock() + + c.connection = conn + return true, nil + case <-c.closeChan: + err = errShuttingDown + return false, err + } + }) + + return err +} + +// keepalive keeps the connection open. +func (c *ManagedConnection) keepalive() error { + for { + select { + default: + if err := c.read(); err != nil { + return err + } + case <-c.closeChan: + return errShuttingDown + } + } +} + +// closeConnection closes the underlying websocket connection. +func (c *ManagedConnection) closeConnection() error { + c.connectionLock.Lock() + defer c.connectionLock.Unlock() + + if c.connection != nil { + err := c.connection.Close() + c.connection = nil + return err + } + return nil +} + +// read reads the next message from the connection. +// If a messageChan is supplied and the current message type is not +// a control message, the message is sent to that channel. +func (c *ManagedConnection) read() error { + c.connectionLock.RLock() + defer c.connectionLock.RUnlock() + + if c.connection == nil { + return ErrConnectionNotEstablished + } + + c.readerLock.Lock() + defer c.readerLock.Unlock() + + messageType, reader, err := c.connection.NextReader() + if err != nil { + return err + } + + // Send the message to the channel if its an application level message + // and if that channel is set. + // TODO(markusthoemmes): Return the messageType along with the payload. + if c.messageChan != nil && (messageType == websocket.TextMessage || messageType == websocket.BinaryMessage) { + if message, _ := ioutil.ReadAll(reader); message != nil { + c.messageChan <- message + } + } + + return nil +} + +func (c *ManagedConnection) write(messageType int, body []byte) error { + c.connectionLock.RLock() + defer c.connectionLock.RUnlock() + + if c.connection == nil { + return ErrConnectionNotEstablished + } + + c.writerLock.Lock() + defer c.writerLock.Unlock() + + return c.connection.WriteMessage(messageType, body) +} + +// Status checks the connection status of the webhook. +func (c *ManagedConnection) Status() error { + c.connectionLock.RLock() + defer c.connectionLock.RUnlock() + + if c.connection == nil { + return ErrConnectionNotEstablished + } + return nil +} + +// Send sends an encodable message over the websocket connection. +func (c *ManagedConnection) Send(msg interface{}) error { + var b bytes.Buffer + enc := gob.NewEncoder(&b) + if err := enc.Encode(msg); err != nil { + return err + } + + return c.write(websocket.BinaryMessage, b.Bytes()) +} + +// Shutdown closes the websocket connection. +func (c *ManagedConnection) Shutdown() error { + c.closeOnce.Do(func() { + close(c.closeChan) + }) + + err := c.closeConnection() + c.processingWg.Wait() + return err +} diff --git a/vendor/github.com/knative/pkg/websocket/hijack.go b/vendor/github.com/knative/pkg/websocket/hijack.go new file mode 100644 index 000000000..f1ac07573 --- /dev/null +++ b/vendor/github.com/knative/pkg/websocket/hijack.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package websocket + +import ( + "bufio" + "fmt" + "net" + "net/http" +) + +// HijackIfPossible calls Hijack() on the given http.ResponseWriter if it implements +// http.Hijacker interface, which is required for net/http/httputil/reverseproxy +// to handle connection upgrade/switching protocol. Otherwise returns an error. +func HijackIfPossible(w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { + hj, ok := w.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("wrapped writer of type %T can't be hijacked", w) + } + return hj.Hijack() +} diff --git a/vendor/github.com/knative/test-infra/scripts/README.md b/vendor/github.com/knative/test-infra/scripts/README.md deleted file mode 100644 index 6a8d6f48b..000000000 --- a/vendor/github.com/knative/test-infra/scripts/README.md +++ /dev/null @@ -1,293 +0,0 @@ -# Helper scripts - -This directory contains helper scripts used by Prow test jobs, as well and local -development scripts. - -## Using the `presubmit-tests.sh` helper script - -This is a helper script to run the presubmit tests. To use it: - -1. Source this script. - -1. [optional] Define the function `build_tests()`. If you don't define this - function, the default action for running the build tests is to: - - - check markdown files - - run `go build` on the entire repo - - run `/hack/verify-codegen.sh` (if it exists) - - check licenses in all go packages - - The markdown link checker tool doesn't check `localhost` links by default. - Its configuration file, `markdown-link-check-config.json`, lives in the - `test-infra/scripts` directory. To override it, create a file with the same - name, containing the custom config in the `/test` directory. - - The markdown lint tool ignores long lines by default. Its configuration file, - `markdown-lint-config.rc`, lives in the `test-infra/scripts` directory. To - override it, create a file with the same name, containing the custom config - in the `/test` directory. - -1. [optional] Customize the default build test runner, if you're using it. Set - the following environment variables if the default values don't fit your - needs: - - - `DISABLE_MD_LINTING`: Disable linting markdown files, defaults to 0 - (false). - - `DISABLE_MD_LINK_CHECK`: Disable checking links in markdown files, defaults - to 0 (false). - - `PRESUBMIT_TEST_FAIL_FAST`: Fail the presubmit test immediately if a test - fails, defaults to 0 (false). - -1. [optional] Define the functions `pre_build_tests()` and/or - `post_build_tests()`. These functions will be called before or after the - build tests (either your custom one or the default action) and will cause the - test to fail if they don't return success. - -1. [optional] Define the function `unit_tests()`. If you don't define this - function, the default action for running the unit tests is to run all go - tests in the repo. - -1. [optional] Define the functions `pre_unit_tests()` and/or - `post_unit_tests()`. These functions will be called before or after the unit - tests (either your custom one or the default action) and will cause the test - to fail if they don't return success. - -1. [optional] Define the function `integration_tests()`. If you don't define - this function, the default action for running the integration tests is to run - all run all `./test/e2e-*tests.sh` scripts, in sequence. - -1. [optional] Define the functions `pre_integration_tests()` and/or - `post_integration_tests()`. These functions will be called before or after - the integration tests (either your custom one or the default action) and will - cause the test to fail if they don't return success. - -1. Call the `main()` function passing `$@` (without quotes). - -Running the script without parameters, or with the `--all-tests` flag causes all -tests to be executed, in the right order (i.e., build, then unit, then -integration tests). - -Use the flags `--build-tests`, `--unit-tests` and `--integration-tests` to run a -specific set of tests. The flag `--emit-metrics` is used to emit metrics when -running the tests, and is automatically handled by the default action for -integration tests (see above). - -The script will automatically skip all presubmit tests for PRs where all changed -files are exempt of tests (e.g., a PR changing only the `OWNERS` file). - -Also, for PRs touching only markdown files, the unit and integration tests are -skipped. - -### Sample presubmit test script - -```bash -source vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh - -function post_build_tests() { - echo "Cleaning up after build tests" - rm -fr ./build-cache -} - -function unit_tests() { - make -C tests test -} - -function pre_integration_tests() { - echo "Cleaning up before integration tests" - rm -fr ./staging-area -} - -# We use the default integration test runner. - -main $@ -``` - -## Using the `e2e-tests.sh` helper script - -This is a helper script for Knative E2E test scripts. To use it: - -1. [optional] Customize the test cluster. Set the following environment - variables if the default values don't fit your needs: - - - `E2E_CLUSTER_REGION`: Cluster region, defaults to `us-central1`. - - `E2E_CLUSTER_BACKUP_REGIONS`: Space-separated list of regions to retry test - cluster creation in case of stockout. Defaults to `us-west1 us-east1`. - - `E2E_CLUSTER_ZONE`: Cluster zone (e.g., `a`), defaults to none (i.e. use a - regional cluster). - - `E2E_CLUSTER_BACKUP_ZONES`: Space-separated list of zones to retry test - cluster creation in case of stockout. If defined, - `E2E_CLUSTER_BACKUP_REGIONS` will be ignored thus it defaults to none. - - `E2E_CLUSTER_MACHINE`: Cluster node machine type, defaults to - `n1-standard-4}`. - - `E2E_MIN_CLUSTER_NODES`: Minimum number of nodes in the cluster when - autoscaling, defaults to 1. - - `E2E_MAX_CLUSTER_NODES`: Maximum number of nodes in the cluster when - autoscaling, defaults to 3. - -1. Source the script. - -1. [optional] Write the `knative_setup()` function, which will set up your - system under test (e.g., Knative Serving). This function won't be called if - you use the `--skip-knative-setup` flag. - -1. [optional] Write the `knative_teardown()` function, which will tear down your - system under test (e.g., Knative Serving). This function won't be called if - you use the `--skip-knative-setup` flag. - -1. [optional] Write the `test_setup()` function, which will set up the test - resources. - -1. [optional] Write the `test_teardown()` function, which will tear down the - test resources. - -1. [optional] Write the `cluster_setup()` function, which will set up any - resources before the test cluster is created. - -1. [optional] Write the `cluster_teardown()` function, which will tear down any - resources after the test cluster is destroyed. - -1. [optional] Write the `dump_extra_cluster_state()` function. It will be called - when a test fails, and can dump extra information about the current state of - the cluster (typically using `kubectl`). - -1. [optional] Write the `parse_flags()` function. It will be called whenever an - unrecognized flag is passed to the script, allowing you to define your own - flags. The function must return 0 if the flag is unrecognized, or the number - of items to skip in the command line if the flag was parsed successfully. For - example, return 1 for a simple flag, and 2 for a flag with a parameter. - -1. Call the `initialize()` function passing `$@` (without quotes). - -1. Write logic for the end-to-end tests. Run all go tests using `go_test_e2e()` - (or `report_go_test()` if you need a more fine-grained control) and call - `fail_test()` or `success()` if any of them failed. The environment variable - `KO_DOCKER_REPO` and `E2E_PROJECT_ID` will be set according to the test - cluster. You can also use the following boolean (0 is false, 1 is true) - environment variables for the logic: - - - `EMIT_METRICS`: true if `--emit-metrics` was passed. - - All environment variables above are marked read-only. - -**Notes:** - -1. Calling your script without arguments will create a new cluster in the GCP - project `$PROJECT_ID` and run the tests against it. - -1. Calling your script with `--run-tests` and the variable `KO_DOCKER_REPO` set - will immediately start the tests against the cluster currently configured for - `kubectl`. - -1. By default Istio is installed on the cluster via Addon, use - `--skip-istio-addon` if you choose not to have it preinstalled. - -1. You can force running the tests against a specific GKE cluster version by - using the `--cluster-version` flag and passing a full version as the flag - value. - -### Sample end-to-end test script - -This script will test that the latest Knative Serving nightly release works. It -defines a special flag (`--no-knative-wait`) that causes the script not to wait -for Knative Serving to be up before running the tests. It also requires that the -test cluster is created in a specific region, `us-west2`. - -```bash - -# This test requires a cluster in LA -E2E_CLUSTER_REGION=us-west2 - -source vendor/github.com/knative/test-infra/scripts/e2e-tests.sh - -function knative_setup() { - start_latest_knative_serving - if (( WAIT_FOR_KNATIVE )); then - wait_until_pods_running knative-serving || fail_test "Knative Serving not up" - fi -} - -function parse_flags() { - if [[ "$1" == "--no-knative-wait" ]]; then - WAIT_FOR_KNATIVE=0 - return 1 - fi - return 0 -} - -WAIT_FOR_KNATIVE=1 - -initialize $@ - -# TODO: use go_test_e2e to run the tests. -kubectl get pods || fail_test - -success -``` - -## Using the `release.sh` helper script - -This is a helper script for Knative release scripts. To use it: - -1. Source the script. - -1. [optional] By default, the release script will run - `./test/presubmit-tests.sh` as the release validation tests. If you need to - run something else, set the environment variable `VALIDATION_TESTS` to the - executable to run. - -1. Write logic for building the release in a function named `build_release()`. - Set the environment variable `ARTIFACTS_TO_PUBLISH` to the list of files - created, space separated. Use the following boolean (0 is false, 1 is true) - and string environment variables for the logic: - - - `RELEASE_VERSION`: contains the release version if `--version` was passed. - This also overrides the value of the `TAG` variable as `v`. - - `RELEASE_BRANCH`: contains the release branch if `--branch` was passed. - Otherwise it's empty and `master` HEAD will be considered the release - branch. - - `RELEASE_NOTES`: contains the filename with the release notes if - `--release-notes` was passed. The release notes is a simple markdown file. - - `RELEASE_GCS_BUCKET`: contains the GCS bucket name to store the manifests - if `--release-gcs` was passed, otherwise the default value - `knative-nightly/` will be used. It is empty if `--publish` was not - passed. - - `BUILD_COMMIT_HASH`: the commit short hash for the current repo. If the - current git tree is dirty, it will have `-dirty` appended to it. - - `BUILD_YYYYMMDD`: current UTC date in `YYYYMMDD` format. - - `BUILD_TIMESTAMP`: human-readable UTC timestamp in `YYYY-MM-DD HH:MM:SS` - format. - - `BUILD_TAG`: a tag in the form `v$BUILD_YYYYMMDD-$BUILD_COMMIT_HASH`. - - `KO_DOCKER_REPO`: contains the GCR to store the images if `--release-gcr` - was passed, otherwise the default value `gcr.io/knative-nightly` will be - used. It is set to `ko.local` if `--publish` was not passed. - - `SKIP_TESTS`: true if `--skip-tests` was passed. This is handled - automatically. - - `TAG_RELEASE`: true if `--tag-release` was passed. In this case, the - environment variable `TAG` will contain the release tag in the form - `v$BUILD_TAG`. - - `PUBLISH_RELEASE`: true if `--publish` was passed. In this case, the - environment variable `KO_FLAGS` will be updated with the `-L` option and - `TAG` will contain the release tag in the form `v$RELEASE_VERSION`. - - `PUBLISH_TO_GITHUB`: true if `--version`, `--branch` and - `--publish-release` were passed. - - All boolean environment variables default to false for safety. - - All environment variables above, except `KO_FLAGS`, are marked read-only once - `main()` is called (see below). - -1. Call the `main()` function passing `$@` (without quotes). - -### Sample release script - -```bash -source vendor/github.com/knative/test-infra/scripts/release.sh - -function build_release() { - # config/ contains the manifests - ko resolve ${KO_FLAGS} -f config/ > release.yaml - ARTIFACTS_TO_PUBLISH="release.yaml" -} - -main $@ -``` diff --git a/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh b/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh deleted file mode 100755 index ea01eeae6..000000000 --- a/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh +++ /dev/null @@ -1,473 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is a helper script for Knative E2E test scripts. -# See README.md for instructions on how to use it. - -source $(dirname ${BASH_SOURCE})/library.sh - -# Build a resource name based on $E2E_BASE_NAME, a suffix and $BUILD_NUMBER. -# Restricts the name length to 40 chars (the limit for resource names in GCP). -# Name will have the form $E2E_BASE_NAME-$BUILD_NUMBER. -# Parameters: $1 - name suffix -function build_resource_name() { - local prefix=${E2E_BASE_NAME}-$1 - local suffix=${BUILD_NUMBER} - # Restrict suffix length to 20 chars - if [[ -n "${suffix}" ]]; then - suffix=${suffix:${#suffix}<20?0:-20} - fi - local name="${prefix:0:20}${suffix}" - # Ensure name doesn't end with "-" - echo "${name%-}" -} - -# Test cluster parameters - -# Configurable parameters -# export E2E_CLUSTER_REGION and E2E_CLUSTER_ZONE as they're used in the cluster setup subprocess -export E2E_CLUSTER_REGION=${E2E_CLUSTER_REGION:-us-central1} -# By default we use regional clusters. -export E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-} - -# Default backup regions in case of stockouts; by default we don't fall back to a different zone in the same region -readonly E2E_CLUSTER_BACKUP_REGIONS=${E2E_CLUSTER_BACKUP_REGIONS:-us-west1 us-east1} -readonly E2E_CLUSTER_BACKUP_ZONES=${E2E_CLUSTER_BACKUP_ZONES:-} - -readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-n1-standard-4} -readonly E2E_GKE_ENVIRONMENT=${E2E_GKE_ENVIRONMENT:-prod} -readonly E2E_GKE_COMMAND_GROUP=${E2E_GKE_COMMAND_GROUP:-beta} - -# Each knative repository may have a different cluster size requirement here, -# so we allow calling code to set these parameters. If they are not set we -# use some sane defaults. -readonly E2E_MIN_CLUSTER_NODES=${E2E_MIN_CLUSTER_NODES:-1} -readonly E2E_MAX_CLUSTER_NODES=${E2E_MAX_CLUSTER_NODES:-3} - -readonly E2E_BASE_NAME="k${REPO_NAME}" -readonly E2E_CLUSTER_NAME=$(build_resource_name e2e-cls) -readonly E2E_NETWORK_NAME=$(build_resource_name e2e-net) -readonly TEST_RESULT_FILE=/tmp/${E2E_BASE_NAME}-e2e-result - -# Flag whether test is using a boskos GCP project -IS_BOSKOS=0 - -# Tear down the test resources. -function teardown_test_resources() { - # On boskos, save time and don't teardown as the cluster will be destroyed anyway. - (( IS_BOSKOS )) && return - header "Tearing down test environment" - function_exists test_teardown && test_teardown - (( ! SKIP_KNATIVE_SETUP )) && function_exists knative_teardown && knative_teardown - # Delete the kubernetes source downloaded by kubetest - rm -fr kubernetes kubernetes.tar.gz -} - -# Run the given E2E tests. Assume tests are tagged e2e, unless `-tags=XXX` is passed. -# Parameters: $1..$n - any go test flags, then directories containing the tests to run. -function go_test_e2e() { - local test_options="" - local go_options="" - (( EMIT_METRICS )) && test_options="-emitmetrics" - [[ ! " $@" == *" -tags="* ]] && go_options="-tags=e2e" - report_go_test -v -count=1 ${go_options} $@ ${test_options} -} - -# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too. -# This is intended to be called when a test fails to provide debugging information. -function dump_cluster_state() { - echo "***************************************" - echo "*** E2E TEST FAILED ***" - echo "*** Start of information dump ***" - echo "***************************************" - echo ">>> All resources:" - kubectl get all --all-namespaces - echo ">>> Services:" - kubectl get services --all-namespaces - echo ">>> Events:" - kubectl get events --all-namespaces - function_exists dump_extra_cluster_state && dump_extra_cluster_state - echo "***************************************" - echo "*** E2E TEST FAILED ***" - echo "*** End of information dump ***" - echo "***************************************" -} - -# On a Prow job, save some metadata about the test for Testgrid. -function save_metadata() { - (( ! IS_PROW )) && return - local geo_key="Region" - local geo_value="${E2E_CLUSTER_REGION}" - if [[ -n "${E2E_CLUSTER_ZONE}" ]]; then - geo_key="Zone" - geo_value="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}" - fi - local cluster_version="$(gcloud container clusters list --project=${E2E_PROJECT_ID} --format='value(currentMasterVersion)')" - cat << EOF > ${ARTIFACTS}/metadata.json -{ - "E2E:${geo_key}": "${geo_value}", - "E2E:Machine": "${E2E_CLUSTER_MACHINE}", - "E2E:Version": "${cluster_version}", - "E2E:MinNodes": "${E2E_MIN_CLUSTER_NODES}", - "E2E:MaxNodes": "${E2E_MAX_CLUSTER_NODES}" -} -EOF -} - -# Set E2E_CLUSTER_VERSION to a specific GKE version. -# Parameters: $1 - target GKE version (X.Y, X.Y.Z, X.Y.Z-gke.W, default or gke-latest). -# $2 - region[-zone] where the clusteer will be created. -function resolve_k8s_version() { - local target_version="$1" - if [[ "${target_version}" == "default" ]]; then - local version="$(gcloud container get-server-config \ - --format='value(defaultClusterVersion)' \ - --zone=$2)" - [[ -z "${version}" ]] && return 1 - E2E_CLUSTER_VERSION="${version}" - echo "Using default version, ${E2E_CLUSTER_VERSION}" - return 0 - fi - # Fetch valid versions - local versions="$(gcloud container get-server-config \ - --format='value(validMasterVersions)' \ - --zone=$2)" - [[ -z "${versions}" ]] && return 1 - local gke_versions=($(echo -n "${versions//;/ }")) - echo "Available GKE versions in $2 are [${versions//;/, }]" - if [[ "${target_version}" == "gke-latest" ]]; then - # Get first (latest) version, excluding the "-gke.#" suffix - E2E_CLUSTER_VERSION="${gke_versions[0]}" - echo "Using latest version, ${E2E_CLUSTER_VERSION}" - else - local latest="$(echo "${gke_versions[@]}" | tr ' ' '\n' | grep -E ^${target_version} | cut -f1 -d- | sort | tail -1)" - if [[ -z "${latest}" ]]; then - echo "ERROR: version ${target_version} is not available" - return 1 - fi - E2E_CLUSTER_VERSION="${latest}" - echo "Using ${E2E_CLUSTER_VERSION} for supplied version ${target_version}" - fi - return 0 -} - -# Create a test cluster with kubetest and call the current script again. -function create_test_cluster() { - # Fail fast during setup. - set -o errexit - set -o pipefail - - if function_exists cluster_setup; then - cluster_setup || fail_test "cluster setup failed" - fi - - echo "Cluster will have a minimum of ${E2E_MIN_CLUSTER_NODES} and a maximum of ${E2E_MAX_CLUSTER_NODES} nodes." - - # Smallest cluster required to run the end-to-end-tests - local CLUSTER_CREATION_ARGS=( - --gke-create-command="container clusters create --quiet --enable-autoscaling --min-nodes=${E2E_MIN_CLUSTER_NODES} --max-nodes=${E2E_MAX_CLUSTER_NODES} --scopes=cloud-platform --enable-basic-auth --no-issue-client-certificate ${GKE_ADDONS} ${EXTRA_CLUSTER_CREATION_FLAGS[@]}" - --gke-shape={\"default\":{\"Nodes\":${E2E_MIN_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}} - --provider=gke - --deployment=gke - --cluster="${E2E_CLUSTER_NAME}" - --gcp-network="${E2E_NETWORK_NAME}" - --gcp-node-image="${SERVING_GKE_IMAGE}" - --gke-environment="${E2E_GKE_ENVIRONMENT}" - --gke-command-group="${E2E_GKE_COMMAND_GROUP}" - --test=false - --up - ) - if (( ! IS_BOSKOS )); then - CLUSTER_CREATION_ARGS+=(--gcp-project=${GCP_PROJECT}) - fi - # SSH keys are not used, but kubetest checks for their existence. - # Touch them so if they don't exist, empty files are create to satisfy the check. - mkdir -p $HOME/.ssh - touch $HOME/.ssh/google_compute_engine.pub - touch $HOME/.ssh/google_compute_engine - # Assume test failed (see details in set_test_return_code()). - set_test_return_code 1 - local gcloud_project="${GCP_PROJECT}" - [[ -z "${gcloud_project}" ]] && gcloud_project="$(gcloud config get-value project)" - echo "gcloud project is ${gcloud_project}" - echo "gcloud user is $(gcloud config get-value core/account)" - (( IS_BOSKOS )) && echo "Using boskos for the test cluster" - [[ -n "${GCP_PROJECT}" ]] && echo "GCP project for test cluster is ${GCP_PROJECT}" - echo "Test script is ${E2E_SCRIPT}" - # Set arguments for this script again - local test_cmd_args="--run-tests" - (( EMIT_METRICS )) && test_cmd_args+=" --emit-metrics" - (( SKIP_KNATIVE_SETUP )) && test_cmd_args+=" --skip-knative-setup" - [[ -n "${GCP_PROJECT}" ]] && test_cmd_args+=" --gcp-project ${GCP_PROJECT}" - [[ -n "${E2E_SCRIPT_CUSTOM_FLAGS[@]}" ]] && test_cmd_args+=" ${E2E_SCRIPT_CUSTOM_FLAGS[@]}" - local extra_flags=() - # If using boskos, save time and let it tear down the cluster - (( ! IS_BOSKOS )) && extra_flags+=(--down) - - # Set a minimal kubernetes environment that satisfies kubetest - # TODO(adrcunha): Remove once https://github.com/kubernetes/test-infra/issues/13029 is fixed. - local kubedir="$(mktemp -d --tmpdir kubernetes.XXXXXXXXXX)" - local test_wrapper="${kubedir}/e2e-test.sh" - mkdir ${kubedir}/cluster - ln -s "$(which kubectl)" ${kubedir}/cluster/kubectl.sh - echo "#!/bin/bash" > ${test_wrapper} - echo "cd $(pwd) && set -x" >> ${test_wrapper} - echo "${E2E_SCRIPT} ${test_cmd_args}" >> ${test_wrapper} - chmod +x ${test_wrapper} - cd ${kubedir} - - # Create cluster and run the tests - create_test_cluster_with_retries "${CLUSTER_CREATION_ARGS[@]}" \ - --test-cmd "${test_wrapper}" \ - ${extra_flags[@]} \ - ${EXTRA_KUBETEST_FLAGS[@]} - echo "Test subprocess exited with code $?" - # Ignore any errors below, this is a best-effort cleanup and shouldn't affect the test result. - set +o errexit - function_exists cluster_teardown && cluster_teardown - local result=$(get_test_return_code) - echo "Artifacts were written to ${ARTIFACTS}" - echo "Test result code is ${result}" - exit ${result} -} - -# Retry backup regions/zones if cluster creations failed due to stockout. -# Parameters: $1..$n - any kubetest flags other than geo flag. -function create_test_cluster_with_retries() { - local cluster_creation_log=/tmp/${E2E_BASE_NAME}-cluster_creation-log - # zone_not_provided is a placeholder for e2e_cluster_zone to make for loop below work - local zone_not_provided="zone_not_provided" - - local e2e_cluster_regions=(${E2E_CLUSTER_REGION}) - local e2e_cluster_zones=(${E2E_CLUSTER_ZONE}) - - if [[ -n "${E2E_CLUSTER_BACKUP_ZONES}" ]]; then - e2e_cluster_zones+=(${E2E_CLUSTER_BACKUP_ZONES}) - elif [[ -n "${E2E_CLUSTER_BACKUP_REGIONS}" ]]; then - e2e_cluster_regions+=(${E2E_CLUSTER_BACKUP_REGIONS}) - e2e_cluster_zones=(${zone_not_provided}) - else - echo "No backup region/zone set, cluster creation will fail in case of stockout" - fi - - local e2e_cluster_target_version="${E2E_CLUSTER_VERSION}" - for e2e_cluster_region in "${e2e_cluster_regions[@]}"; do - for e2e_cluster_zone in "${e2e_cluster_zones[@]}"; do - E2E_CLUSTER_REGION=${e2e_cluster_region} - E2E_CLUSTER_ZONE=${e2e_cluster_zone} - [[ "${E2E_CLUSTER_ZONE}" == "${zone_not_provided}" ]] && E2E_CLUSTER_ZONE="" - local cluster_creation_zone="${E2E_CLUSTER_REGION}" - [[ -n "${E2E_CLUSTER_ZONE}" ]] && cluster_creation_zone="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}" - resolve_k8s_version ${e2e_cluster_target_version} ${cluster_creation_zone} || return 1 - - header "Creating test cluster ${E2E_CLUSTER_VERSION} in ${cluster_creation_zone}" - # Don't fail test for kubetest, as it might incorrectly report test failure - # if teardown fails (for details, see success() below) - set +o errexit - export CLUSTER_API_VERSION=${E2E_CLUSTER_VERSION} - run_go_tool k8s.io/test-infra/kubetest \ - kubetest "$@" --gcp-region=${cluster_creation_zone} 2>&1 | tee ${cluster_creation_log} - - # Exit if test succeeded - [[ "$(get_test_return_code)" == "0" ]] && return 0 - # Retry if cluster creation failed because of: - # - stockout (https://github.com/knative/test-infra/issues/592) - # - latest GKE not available in this region/zone yet (https://github.com/knative/test-infra/issues/694) - [[ -z "$(grep -Fo 'does not have enough resources available to fulfill' ${cluster_creation_log})" \ - && -z "$(grep -Fo 'ResponseError: code=400, message=No valid versions with the prefix' ${cluster_creation_log})" \ - && -z "$(grep -Po 'ResponseError: code=400, message=Master version "[0-9a-z\-\.]+" is unsupported' ${cluster_creation_log})" ]] \ - && return 1 - done - done - echo "No more region/zones to try, quitting" - return 1 -} - -# Setup the test cluster for running the tests. -function setup_test_cluster() { - # Fail fast during setup. - set -o errexit - set -o pipefail - - header "Setting up test cluster" - - # Set the actual project the test cluster resides in - # It will be a project assigned by Boskos if test is running on Prow, - # otherwise will be ${GCP_PROJECT} set up by user. - readonly export E2E_PROJECT_ID="$(gcloud config get-value project)" - - # Save some metadata about cluster creation for using in prow and testgrid - save_metadata - - local k8s_user=$(gcloud config get-value core/account) - local k8s_cluster=$(kubectl config current-context) - - is_protected_cluster ${k8s_cluster} && \ - abort "kubeconfig context set to ${k8s_cluster}, which is forbidden" - - # If cluster admin role isn't set, this is a brand new cluster - # Setup the admin role and also KO_DOCKER_REPO - if [[ -z "$(kubectl get clusterrolebinding cluster-admin-binding 2> /dev/null)" ]]; then - acquire_cluster_admin_role ${k8s_user} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_REGION} ${E2E_CLUSTER_ZONE} - kubectl config set-context ${k8s_cluster} --namespace=default - # Incorporate an element of randomness to ensure that each run properly publishes images. - export KO_DOCKER_REPO=gcr.io/${E2E_PROJECT_ID}/${E2E_BASE_NAME}-e2e-img/${RANDOM} - fi - - # Safety checks - is_protected_gcr ${KO_DOCKER_REPO} && \ - abort "\$KO_DOCKER_REPO set to ${KO_DOCKER_REPO}, which is forbidden" - - echo "- Project is ${E2E_PROJECT_ID}" - echo "- Cluster is ${k8s_cluster}" - echo "- User is ${k8s_user}" - echo "- Docker is ${KO_DOCKER_REPO}" - - export KO_DATA_PATH="${REPO_ROOT_DIR}/.git" - - trap teardown_test_resources EXIT - - # Handle failures ourselves, so we can dump useful info. - set +o errexit - set +o pipefail - - if (( ! SKIP_KNATIVE_SETUP )) && function_exists knative_setup; then - # Wait for Istio installation to complete, if necessary, before calling knative_setup. - (( ! SKIP_ISTIO_ADDON )) && (wait_until_batch_job_complete istio-system || return 1) - knative_setup || fail_test "Knative setup failed" - fi - if function_exists test_setup; then - test_setup || fail_test "test setup failed" - fi -} - -# Gets the exit of the test script. -# For more details, see set_test_return_code(). -function get_test_return_code() { - echo $(cat ${TEST_RESULT_FILE}) -} - -# Set the return code that the test script will return. -# Parameters: $1 - return code (0-255) -function set_test_return_code() { - # kubetest teardown might fail and thus incorrectly report failure of the - # script, even if the tests pass. - # We store the real test result to return it later, ignoring any teardown - # failure in kubetest. - # TODO(adrcunha): Get rid of this workaround. - echo -n "$1"> ${TEST_RESULT_FILE} -} - -# Signal (as return code and in the logs) that all E2E tests passed. -function success() { - set_test_return_code 0 - echo "**************************************" - echo "*** E2E TESTS PASSED ***" - echo "**************************************" - exit 0 -} - -# Exit test, dumping current state info. -# Parameters: $1 - error message (optional). -function fail_test() { - set_test_return_code 1 - [[ -n $1 ]] && echo "ERROR: $1" - dump_cluster_state - exit 1 -} - -RUN_TESTS=0 -EMIT_METRICS=0 -SKIP_KNATIVE_SETUP=0 -SKIP_ISTIO_ADDON=0 -GCP_PROJECT="" -E2E_SCRIPT="" -E2E_CLUSTER_VERSION="" -GKE_ADDONS="" -EXTRA_CLUSTER_CREATION_FLAGS=() -EXTRA_KUBETEST_FLAGS=() -E2E_SCRIPT_CUSTOM_FLAGS=() - -# Parse flags and initialize the test cluster. -function initialize() { - E2E_SCRIPT="$(get_canonical_path $0)" - E2E_CLUSTER_VERSION="${SERVING_GKE_VERSION}" - - cd ${REPO_ROOT_DIR} - while [[ $# -ne 0 ]]; do - local parameter=$1 - # Try parsing flag as a custom one. - if function_exists parse_flags; then - parse_flags $@ - local skip=$? - if [[ ${skip} -ne 0 ]]; then - # Skip parsed flag (and possibly argument) and continue - # Also save it to it's passed through to the test script - for ((i=1;i<=skip;i++)); do - E2E_SCRIPT_CUSTOM_FLAGS+=("$1") - shift - done - continue - fi - fi - # Try parsing flag as a standard one. - case ${parameter} in - --run-tests) RUN_TESTS=1 ;; - --emit-metrics) EMIT_METRICS=1 ;; - --skip-knative-setup) SKIP_KNATIVE_SETUP=1 ;; - --skip-istio-addon) SKIP_ISTIO_ADDON=1 ;; - *) - [[ $# -ge 2 ]] || abort "missing parameter after $1" - shift - case ${parameter} in - --gcp-project) GCP_PROJECT=$1 ;; - --cluster-version) E2E_CLUSTER_VERSION=$1 ;; - --cluster-creation-flag) EXTRA_CLUSTER_CREATION_FLAGS+=($1) ;; - --kubetest-flag) EXTRA_KUBETEST_FLAGS+=($1) ;; - *) abort "unknown option ${parameter}" ;; - esac - esac - shift - done - - # Use PROJECT_ID if set, unless --gcp-project was used. - if [[ -n "${PROJECT_ID:-}" && -z "${GCP_PROJECT}" ]]; then - echo "\$PROJECT_ID is set to '${PROJECT_ID}', using it to run the tests" - GCP_PROJECT="${PROJECT_ID}" - fi - if (( ! IS_PROW )) && (( ! RUN_TESTS )) && [[ -z "${GCP_PROJECT}" ]]; then - abort "set \$PROJECT_ID or use --gcp-project to select the GCP project where the tests are run" - fi - - (( IS_PROW )) && [[ -z "${GCP_PROJECT}" ]] && IS_BOSKOS=1 - - (( SKIP_ISTIO_ADDON )) || GKE_ADDONS="--addons=Istio" - - readonly RUN_TESTS - readonly EMIT_METRICS - readonly GCP_PROJECT - readonly IS_BOSKOS - readonly EXTRA_CLUSTER_CREATION_FLAGS - readonly EXTRA_KUBETEST_FLAGS - readonly SKIP_KNATIVE_SETUP - readonly GKE_ADDONS - - if (( ! RUN_TESTS )); then - create_test_cluster - else - setup_test_cluster - fi -} diff --git a/vendor/github.com/knative/test-infra/scripts/library.sh b/vendor/github.com/knative/test-infra/scripts/library.sh deleted file mode 100755 index 68d688d96..000000000 --- a/vendor/github.com/knative/test-infra/scripts/library.sh +++ /dev/null @@ -1,525 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is a collection of useful bash functions and constants, intended -# to be used in test scripts and the like. It doesn't do anything when -# called from command line. - -# GCP project where all tests related resources live -readonly KNATIVE_TESTS_PROJECT=knative-tests - -# Default GKE version to be used with Knative Serving -readonly SERVING_GKE_VERSION=gke-latest -readonly SERVING_GKE_IMAGE=cos - -# Conveniently set GOPATH if unset -if [[ -z "${GOPATH:-}" ]]; then - export GOPATH="$(go env GOPATH)" - if [[ -z "${GOPATH}" ]]; then - echo "WARNING: GOPATH not set and go binary unable to provide it" - fi -fi - -# Useful environment variables -[[ -n "${PROW_JOB_ID:-}" ]] && IS_PROW=1 || IS_PROW=0 -readonly IS_PROW -[[ -z "${REPO_ROOT_DIR:-}" ]] && REPO_ROOT_DIR="$(git rev-parse --show-toplevel)" -readonly REPO_ROOT_DIR -readonly REPO_NAME="$(basename ${REPO_ROOT_DIR})" - -# Useful flags about the current OS -IS_LINUX=0 -IS_OSX=0 -IS_WINDOWS=0 -case "${OSTYPE}" in - darwin*) IS_OSX=1 ;; - linux*) IS_LINUX=1 ;; - msys*) IS_WINDOWS=1 ;; - *) echo "** Internal error in library.sh, unknown OS '${OSTYPE}'" ; exit 1 ;; -esac -readonly IS_LINUX -readonly IS_OSX -readonly IS_WINDOWS - -# Set ARTIFACTS to an empty temp dir if unset -if [[ -z "${ARTIFACTS:-}" ]]; then - export ARTIFACTS="$(mktemp -d)" -fi - -# On a Prow job, redirect stderr to stdout so it's synchronously added to log -(( IS_PROW )) && exec 2>&1 - -# Print error message and exit 1 -# Parameters: $1..$n - error message to be displayed -function abort() { - echo "error: $@" - exit 1 -} - -# Display a box banner. -# Parameters: $1 - character to use for the box. -# $2 - banner message. -function make_banner() { - local msg="$1$1$1$1 $2 $1$1$1$1" - local border="${msg//[-0-9A-Za-z _.,\/()\']/$1}" - echo -e "${border}\n${msg}\n${border}" - # TODO(adrcunha): Remove once logs have timestamps on Prow - # For details, see https://github.com/kubernetes/test-infra/issues/10100 - echo -e "$1$1$1$1 $(TZ='America/Los_Angeles' date)\n${border}" -} - -# Simple header for logging purposes. -function header() { - local upper="$(echo $1 | tr a-z A-Z)" - make_banner "=" "${upper}" -} - -# Simple subheader for logging purposes. -function subheader() { - make_banner "-" "$1" -} - -# Simple warning banner for logging purposes. -function warning() { - make_banner "!" "$1" -} - -# Checks whether the given function exists. -function function_exists() { - [[ "$(type -t $1)" == "function" ]] -} - -# Waits until the given object doesn't exist. -# Parameters: $1 - the kind of the object. -# $2 - object's name. -# $3 - namespace (optional). -function wait_until_object_does_not_exist() { - local KUBECTL_ARGS="get $1 $2" - local DESCRIPTION="$1 $2" - - if [[ -n $3 ]]; then - KUBECTL_ARGS="get -n $3 $1 $2" - DESCRIPTION="$1 $3/$2" - fi - echo -n "Waiting until ${DESCRIPTION} does not exist" - for i in {1..150}; do # timeout after 5 minutes - if ! kubectl ${KUBECTL_ARGS} > /dev/null 2>&1; then - echo -e "\n${DESCRIPTION} does not exist" - return 0 - fi - echo -n "." - sleep 2 - done - echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist" - kubectl ${KUBECTL_ARGS} - return 1 -} - -# Waits until all pods are running in the given namespace. -# Parameters: $1 - namespace. -function wait_until_pods_running() { - echo -n "Waiting until all pods in namespace $1 are up" - for i in {1..150}; do # timeout after 5 minutes - local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)" - # All pods must be running - local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l) - if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then - local all_ready=1 - while read pod ; do - local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`) - # All containers must be ready - [[ -z ${status[0]} ]] && all_ready=0 && break - [[ -z ${status[1]} ]] && all_ready=0 && break - [[ ${status[0]} -lt 1 ]] && all_ready=0 && break - [[ ${status[1]} -lt 1 ]] && all_ready=0 && break - [[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break - done <<< "$(echo "${pods}" | grep -v Completed)" - if (( all_ready )); then - echo -e "\nAll pods are up:\n${pods}" - return 0 - fi - fi - echo -n "." - sleep 2 - done - echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}" - return 1 -} - -# Waits until all batch jobs complete in the given namespace. -# Parameters: $1 - namespace. -function wait_until_batch_job_complete() { - echo -n "Waiting until all batch jobs in namespace $1 run to completion." - for i in {1..150}; do # timeout after 5 minutes - local jobs=$(kubectl get jobs -n $1 --no-headers \ - -ocustom-columns='n:{.metadata.name},c:{.spec.completions},s:{.status.succeeded}') - # All jobs must be complete - local not_complete=$(echo "${jobs}" | awk '{if ($2!=$3) print $0}' | wc -l) - if [[ ${not_complete} -eq 0 ]]; then - echo -e "\nAll jobs are complete:\n${jobs}" - return 0 - fi - echo -n "." - sleep 2 - done - echo -e "\n\nERROR: timeout waiting for jobs to complete\n${jobs}" - return 1 -} - -# Waits until the given service has an external address (IP/hostname). -# Parameters: $1 - namespace. -# $2 - service name. -function wait_until_service_has_external_ip() { - echo -n "Waiting until service $2 in namespace $1 has an external address (IP/hostname)" - for i in {1..150}; do # timeout after 15 minutes - local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}") - if [[ -n "${ip}" ]]; then - echo -e "\nService $2.$1 has IP $ip" - return 0 - fi - local hostname=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") - if [[ -n "${hostname}" ]]; then - echo -e "\nService $2.$1 has hostname $hostname" - return 0 - fi - echo -n "." - sleep 6 - done - echo -e "\n\nERROR: timeout waiting for service $2.$1 to have an external address" - kubectl get pods -n $1 - return 1 -} - -# Waits for the endpoint to be routable. -# Parameters: $1 - External ingress IP address. -# $2 - cluster hostname. -function wait_until_routable() { - echo -n "Waiting until cluster $2 at $1 has a routable endpoint" - for i in {1..150}; do # timeout after 5 minutes - local val=$(curl -H "Host: $2" "http://$1" 2>/dev/null) - if [[ -n "$val" ]]; then - echo -e "\nEndpoint is now routable" - return 0 - fi - echo -n "." - sleep 2 - done - echo -e "\n\nERROR: Timed out waiting for endpoint to be routable" - return 1 -} - -# Returns the name of the first pod of the given app. -# Parameters: $1 - app name. -# $2 - namespace (optional). -function get_app_pod() { - local pods=($(get_app_pods $1 $2)) - echo "${pods[0]}" -} - -# Returns the name of all pods of the given app. -# Parameters: $1 - app name. -# $2 - namespace (optional). -function get_app_pods() { - local namespace="" - [[ -n $2 ]] && namespace="-n $2" - kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[*].metadata.name}" -} - -# Capitalize the first letter of each word. -# Parameters: $1..$n - words to capitalize. -function capitalize() { - local capitalized=() - for word in $@; do - local initial="$(echo ${word:0:1}| tr 'a-z' 'A-Z')" - capitalized+=("${initial}${word:1}") - done - echo "${capitalized[@]}" -} - -# Dumps pod logs for the given app. -# Parameters: $1 - app name. -# $2 - namespace. -function dump_app_logs() { - echo ">>> ${REPO_NAME_FORMATTED} $1 logs:" - for pod in $(get_app_pods "$1" "$2") - do - echo ">>> Pod: $pod" - kubectl -n "$2" logs "$pod" -c "$1" - done -} - -# Sets the given user as cluster admin. -# Parameters: $1 - user -# $2 - cluster name -# $3 - cluster region -# $4 - cluster zone, optional -function acquire_cluster_admin_role() { - echo "Acquiring cluster-admin role for user '$1'" - local geoflag="--region=$3" - [[ -n $4 ]] && geoflag="--zone=$3-$4" - # Get the password of the admin and use it, as the service account (or the user) - # might not have the necessary permission. - local password=$(gcloud --format="value(masterAuth.password)" \ - container clusters describe $2 ${geoflag}) - if [[ -n "${password}" ]]; then - # Cluster created with basic authentication - kubectl config set-credentials cluster-admin \ - --username=admin --password=${password} - else - local cert=$(mktemp) - local key=$(mktemp) - echo "Certificate in ${cert}, key in ${key}" - gcloud --format="value(masterAuth.clientCertificate)" \ - container clusters describe $2 ${geoflag} | base64 -d > ${cert} - gcloud --format="value(masterAuth.clientKey)" \ - container clusters describe $2 ${geoflag} | base64 -d > ${key} - kubectl config set-credentials cluster-admin \ - --client-certificate=${cert} --client-key=${key} - fi - kubectl config set-context $(kubectl config current-context) \ - --user=cluster-admin - kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole=cluster-admin \ - --user=$1 - # Reset back to the default account - gcloud container clusters get-credentials \ - $2 ${geoflag} --project $(gcloud config get-value project) -} - -# Runs a go test and generate a junit summary. -# Parameters: $1... - parameters to go test -function report_go_test() { - # Run tests in verbose mode to capture details. - # go doesn't like repeating -v, so remove if passed. - local args=" $@ " - local go_test="go test -race -v ${args/ -v / }" - # Just run regular go tests if not on Prow. - echo "Running tests with '${go_test}'" - local report=$(mktemp) - ${go_test} | tee ${report} - local failed=( ${PIPESTATUS[@]} ) - [[ ${failed[0]} -eq 0 ]] && failed=${failed[1]} || failed=${failed[0]} - echo "Finished run, return code is ${failed}" - # Install go-junit-report if necessary. - run_go_tool github.com/jstemmer/go-junit-report go-junit-report --help > /dev/null 2>&1 - local xml=$(mktemp ${ARTIFACTS}/junit_XXXXXXXX.xml) - cat ${report} \ - | go-junit-report \ - | sed -e "s#\"github.com/knative/${REPO_NAME}/#\"#g" \ - > ${xml} - echo "XML report written to ${xml}" - if (( ! IS_PROW )); then - # Keep the suffix, so files are related. - local logfile=${xml/junit_/go_test_} - logfile=${logfile/.xml/.log} - cp ${report} ${logfile} - echo "Test log written to ${logfile}" - fi - return ${failed} -} - -# Install Knative Serving in the current cluster. -# Parameters: $1 - Knative Serving manifest. -function start_knative_serving() { - header "Starting Knative Serving" - subheader "Installing Knative Serving" - echo "Installing Serving CRDs from $1" - kubectl apply --selector knative.dev/crd-install=true -f "$1" - echo "Installing the rest of serving components from $1" - kubectl apply -f "$1" - wait_until_pods_running knative-serving || return 1 -} - -# Install the stable release Knative/serving in the current cluster. -# Parameters: $1 - Knative Serving version number, e.g. 0.6.0. -function start_release_knative_serving() { - start_knative_serving "https://storage.googleapis.com/knative-releases/serving/previous/v$1/serving.yaml" -} - -# Install the latest stable Knative Serving in the current cluster. -function start_latest_knative_serving() { - start_knative_serving "${KNATIVE_SERVING_RELEASE}" -} - -# Run a go tool, installing it first if necessary. -# Parameters: $1 - tool package/dir for go get/install. -# $2 - tool to run. -# $3..$n - parameters passed to the tool. -function run_go_tool() { - local tool=$2 - if [[ -z "$(which ${tool})" ]]; then - local action=get - [[ $1 =~ ^[\./].* ]] && action=install - go ${action} $1 - fi - shift 2 - ${tool} "$@" -} - -# Run dep-collector to update licenses. -# Parameters: $1 - output file, relative to repo root dir. -# $2...$n - directories and files to inspect. -function update_licenses() { - cd ${REPO_ROOT_DIR} || return 1 - local dst=$1 - shift - run_go_tool github.com/knative/test-infra/tools/dep-collector dep-collector $@ > ./${dst} -} - -# Run dep-collector to check for forbidden liceses. -# Parameters: $1...$n - directories and files to inspect. -function check_licenses() { - # Fetch the google/licenseclassifier for its license db - rm -fr ${GOPATH}/src/github.com/google/licenseclassifier - go get -u github.com/google/licenseclassifier - # Check that we don't have any forbidden licenses in our images. - run_go_tool github.com/knative/test-infra/tools/dep-collector dep-collector -check $@ -} - -# Run the given linter on the given files, checking it exists first. -# Parameters: $1 - tool -# $2 - tool purpose (for error message if tool not installed) -# $3 - tool parameters (quote if multiple parameters used) -# $4..$n - files to run linter on -function run_lint_tool() { - local checker=$1 - local params=$3 - if ! hash ${checker} 2>/dev/null; then - warning "${checker} not installed, not $2" - return 127 - fi - shift 3 - local failed=0 - for file in $@; do - ${checker} ${params} ${file} || failed=1 - done - return ${failed} -} - -# Check links in the given markdown files. -# Parameters: $1...$n - files to inspect -function check_links_in_markdown() { - # https://github.com/raviqqe/liche - local config="${REPO_ROOT_DIR}/test/markdown-link-check-config.rc" - [[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-link-check-config.rc" - local options="$(grep '^-' ${config} | tr \"\n\" ' ')" - run_lint_tool liche "checking links in markdown files" "-d ${REPO_ROOT_DIR} ${options}" $@ -} - -# Check format of the given markdown files. -# Parameters: $1..$n - files to inspect -function lint_markdown() { - # https://github.com/markdownlint/markdownlint - local config="${REPO_ROOT_DIR}/test/markdown-lint-config.rc" - [[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-lint-config.rc" - run_lint_tool mdl "linting markdown files" "-c ${config}" $@ -} - -# Return whether the given parameter is an integer. -# Parameters: $1 - integer to check -function is_int() { - [[ -n $1 && $1 =~ ^[0-9]+$ ]] -} - -# Return whether the given parameter is the knative release/nightly GCF. -# Parameters: $1 - full GCR name, e.g. gcr.io/knative-foo-bar -function is_protected_gcr() { - [[ -n $1 && $1 =~ ^gcr.io/knative-(releases|nightly)/?$ ]] -} - -# Return whether the given parameter is any cluster under ${KNATIVE_TESTS_PROJECT}. -# Parameters: $1 - Kubernetes cluster context (output of kubectl config current-context) -function is_protected_cluster() { - # Example: gke_knative-tests_us-central1-f_prow - [[ -n $1 && $1 =~ ^gke_${KNATIVE_TESTS_PROJECT}_us\-[a-zA-Z0-9]+\-[a-z]+_[a-z0-9\-]+$ ]] -} - -# Return whether the given parameter is ${KNATIVE_TESTS_PROJECT}. -# Parameters: $1 - project name -function is_protected_project() { - [[ -n $1 && "$1" == "${KNATIVE_TESTS_PROJECT}" ]] -} - -# Remove symlinks in a path that are broken or lead outside the repo. -# Parameters: $1 - path name, e.g. vendor -function remove_broken_symlinks() { - for link in $(find $1 -type l); do - # Remove broken symlinks - if [[ ! -e ${link} ]]; then - unlink ${link} - continue - fi - # Get canonical path to target, remove if outside the repo - local target="$(ls -l ${link})" - target="${target##* -> }" - [[ ${target} == /* ]] || target="./${target}" - target="$(cd `dirname ${link}` && cd ${target%/*} && echo $PWD/${target##*/})" - if [[ ${target} != *github.com/knative/* ]]; then - unlink ${link} - continue - fi - done -} - -# Returns the canonical path of a filesystem object. -# Parameters: $1 - path to return in canonical form -# $2 - base dir for relative links; optional, defaults to current -function get_canonical_path() { - # We don't use readlink because it's not available on every platform. - local path=$1 - local pwd=${2:-.} - [[ ${path} == /* ]] || path="${pwd}/${path}" - echo "$(cd ${path%/*} && echo $PWD/${path##*/})" -} - -# Returns the URL to the latest manifest for the given Knative project. -# Parameters: $1 - repository name of the given project -# $2 - name of the yaml file, without extension -function get_latest_knative_yaml_source() { - local branch_name="" - local repo_name="$1" - local yaml_name="$2" - # Get the branch name from Prow's env var, see https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md. - # Otherwise, try getting the current branch from git. - (( IS_PROW )) && branch_name="${PULL_BASE_REF:-}" - [[ -z "${branch_name}" ]] && branch_name="$(git rev-parse --abbrev-ref HEAD)" - # If it's a release branch, the yaml source URL should point to a specific version. - if [[ ${branch_name} =~ ^release-[0-9\.]+$ ]]; then - # Get the latest tag name for the current branch, which is likely formatted as v0.5.0 - local tag_name="$(git describe --tags --abbrev=0)" - # The given repo might not have this tag, so we need to find its latest release manifest with the same major&minor version. - local major_minor="$(echo ${tag_name} | cut -d. -f1-2)" - local yaml_source_path="$(gsutil ls gs://knative-releases/${repo_name}/previous/${major_minor}.*/${yaml_name}.yaml \ - | sort \ - | tail -n 1 \ - | cut -b6-)" - echo "https://storage.googleapis.com/${yaml_source_path}" - # If it's not a release branch, the yaml source URL should be nightly build. - else - echo "https://storage.googleapis.com/knative-nightly/${repo_name}/latest/${yaml_name}.yaml" - fi -} - -# Initializations that depend on previous functions. -# These MUST come last. - -readonly _TEST_INFRA_SCRIPTS_DIR="$(dirname $(get_canonical_path ${BASH_SOURCE[0]}))" -readonly REPO_NAME_FORMATTED="Knative $(capitalize ${REPO_NAME//-/})" - -# Public latest nightly or release yaml files. -readonly KNATIVE_SERVING_RELEASE="$(get_latest_knative_yaml_source "serving" "serving")" -readonly KNATIVE_BUILD_RELEASE="$(get_latest_knative_yaml_source "build" "build")" -readonly KNATIVE_EVENTING_RELEASE="$(get_latest_knative_yaml_source "eventing" "release")" diff --git a/vendor/github.com/knative/test-infra/scripts/markdown-link-check-config.rc b/vendor/github.com/knative/test-infra/scripts/markdown-link-check-config.rc deleted file mode 100644 index 9d802a0d4..000000000 --- a/vendor/github.com/knative/test-infra/scripts/markdown-link-check-config.rc +++ /dev/null @@ -1,5 +0,0 @@ -# For help, see -# https://github.com/raviqqe/liche/blob/master/README.md - -# Don't check localhost links --x "^https?://localhost($|[:/].*)" diff --git a/vendor/github.com/knative/test-infra/scripts/markdown-lint-config.rc b/vendor/github.com/knative/test-infra/scripts/markdown-lint-config.rc deleted file mode 100644 index 461f891a2..000000000 --- a/vendor/github.com/knative/test-infra/scripts/markdown-lint-config.rc +++ /dev/null @@ -1,5 +0,0 @@ -# For help, see -# https://github.com/markdownlint/markdownlint/blob/master/docs/configuration.md - -# Ignore long lines -rules "~MD013" diff --git a/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh b/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh deleted file mode 100755 index 72d8a3bb8..000000000 --- a/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh +++ /dev/null @@ -1,381 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is a helper script for Knative presubmit test scripts. -# See README.md for instructions on how to use it. - -source $(dirname ${BASH_SOURCE})/library.sh - -# Custom configuration of presubmit tests -readonly DISABLE_MD_LINTING=${DISABLE_MD_LINTING:-0} -readonly DISABLE_MD_LINK_CHECK=${DISABLE_MD_LINK_CHECK:-0} -readonly PRESUBMIT_TEST_FAIL_FAST=${PRESUBMIT_TEST_FAIL_FAST:-0} - -# Extensions or file patterns that don't require presubmit tests. -readonly NO_PRESUBMIT_FILES=(\.png \.gitignore \.gitattributes ^OWNERS ^OWNERS_ALIASES ^AUTHORS) - -# Flag if this is a presubmit run or not. -[[ IS_PROW && -n "${PULL_PULL_SHA}" ]] && IS_PRESUBMIT=1 || IS_PRESUBMIT=0 -readonly IS_PRESUBMIT - -# List of changed files on presubmit, LF separated. -CHANGED_FILES="" - -# Flags that this PR is exempt of presubmit tests. -IS_PRESUBMIT_EXEMPT_PR=0 - -# Flags that this PR contains only changes to documentation. -IS_DOCUMENTATION_PR=0 - -# Returns true if PR only contains the given file regexes. -# Parameters: $1 - file regexes, space separated. -function pr_only_contains() { - [[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]] -} - -# List changed files in the current PR. -# This is implemented as a function so it can be mocked in unit tests. -function list_changed_files() { - /workspace/githubhelper -list-changed-files -} - -# Initialize flags and context for presubmit tests: -# CHANGED_FILES, IS_PRESUBMIT_EXEMPT_PR and IS_DOCUMENTATION_PR. -function initialize_environment() { - CHANGED_FILES="" - IS_PRESUBMIT_EXEMPT_PR=0 - IS_DOCUMENTATION_PR=0 - (( ! IS_PRESUBMIT )) && return - CHANGED_FILES="$(list_changed_files)" - if [[ -n "${CHANGED_FILES}" ]]; then - echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${CHANGED_FILES}" - local no_presubmit_files="${NO_PRESUBMIT_FILES[*]}" - pr_only_contains "${no_presubmit_files}" && IS_PRESUBMIT_EXEMPT_PR=1 - # A documentation PR must contain markdown files - if pr_only_contains "\.md ${no_presubmit_files}"; then - [[ -n "$(echo "${CHANGED_FILES}" | grep '\.md')" ]] && IS_DOCUMENTATION_PR=1 - fi - else - header "NO CHANGED FILES REPORTED, ASSUMING IT'S AN ERROR AND RUNNING TESTS ANYWAY" - fi - readonly CHANGED_FILES - readonly IS_DOCUMENTATION_PR - readonly IS_PRESUBMIT_EXEMPT_PR -} - -# Display a pass/fail banner for a test group. -# Parameters: $1 - test group name (e.g., build) -# $2 - result (0=passed, 1=failed) -function results_banner() { - local result - [[ $2 -eq 0 ]] && result="PASSED" || result="FAILED" - header "$1 tests ${result}" -} - -# Create a JUnit XML for a failure. -# Parameters: $1 - check name as an identifier (e.g., PresubmitBuildTest) -# $2 - failure message (can contain newlines) -function create_junit_xml() { - local xml="$(mktemp ${ARTIFACTS}/junit_XXXXXXXX.xml)" - # Transform newlines into HTML code. - local msg="$(echo -n "$2" | sed 's/$/\ /g' | tr -d '\n')" - cat << EOF > "${xml}" - - - - ${msg} - - - -EOF -} - -# Run build tests. If there's no `build_tests` function, run the default -# build test runner. -function run_build_tests() { - (( ! RUN_BUILD_TESTS )) && return 0 - header "Running build tests" - local failed=0 - # Run pre-build tests, if any - if function_exists pre_build_tests; then - pre_build_tests || failed=1 - fi - # Don't run build tests if pre-build tests failed - if (( ! failed )); then - if function_exists build_tests; then - build_tests || failed=1 - else - default_build_test_runner || failed=1 - fi - fi - # Don't run post-build tests if pre/build tests failed - if (( ! failed )) && function_exists post_build_tests; then - post_build_tests || failed=1 - fi - results_banner "Build" ${failed} - return ${failed} -} - -# Perform markdown build tests if necessary, unless disabled. -function markdown_build_tests() { - (( DISABLE_MD_LINTING && DISABLE_MD_LINK_CHECK )) && return 0 - # Get changed markdown files (ignore /vendor and deleted files) - local mdfiles="" - for file in $(echo "${CHANGED_FILES}" | grep \.md$ | grep -v ^vendor/); do - [[ -f "${file}" ]] && mdfiles="${mdfiles} ${file}" - done - [[ -z "${mdfiles}" ]] && return 0 - local failed=0 - if (( ! DISABLE_MD_LINTING )); then - subheader "Linting the markdown files" - lint_markdown ${mdfiles} || failed=1 - fi - if (( ! DISABLE_MD_LINK_CHECK )); then - subheader "Checking links in the markdown files" - check_links_in_markdown ${mdfiles} || failed=1 - fi - return ${failed} -} - -# Default build test runner that: -# * check markdown files -# * `go build` on the entire repo -# * run `/hack/verify-codegen.sh` (if it exists) -# * check licenses in all go packages -function default_build_test_runner() { - local failed=0 - # Perform markdown build checks first - markdown_build_tests || failed=1 - # For documentation PRs, just check the md files - (( IS_DOCUMENTATION_PR )) && return ${failed} - # Skip build test if there is no go code - local go_pkg_dirs="$(go list ./...)" - [[ -z "${go_pkg_dirs}" ]] && return ${failed} - # Ensure all the code builds - subheader "Checking that go code builds" - local report=$(mktemp) - go build -v ./... 2>&1 | tee ${report} - local build_failed=( ${PIPESTATUS[@]} ) - if [[ ${build_failed[0]} -ne 0 ]]; then - failed=1 - # Consider an error message everything that's not a package name. - local errors="$(grep -v '^github.com/' "${report}" | sort | uniq)" - create_junit_xml PresubmitBuildTest "${errors}" - fi - # Get all build tags in go code (ignore /vendor) - local tags="$(grep -r '// +build' . \ - | grep -v '^./vendor/' | cut -f3 -d' ' | sort | uniq | tr '\n' ' ')" - if [[ -n "${tags}" ]]; then - go test -run=^$ -tags="${tags}" ./... || failed=1 - fi - if [[ -f ./hack/verify-codegen.sh ]]; then - subheader "Checking autogenerated code is up-to-date" - ./hack/verify-codegen.sh || failed=1 - fi - # Check that we don't have any forbidden licenses in our images. - subheader "Checking for forbidden licenses" - check_licenses ${go_pkg_dirs} || failed=1 - return ${failed} -} - -# Run unit tests. If there's no `unit_tests` function, run the default -# unit test runner. -function run_unit_tests() { - (( ! RUN_UNIT_TESTS )) && return 0 - if (( IS_DOCUMENTATION_PR )); then - header "Documentation only PR, skipping unit tests" - return 0 - fi - header "Running unit tests" - local failed=0 - # Run pre-unit tests, if any - if function_exists pre_unit_tests; then - pre_unit_tests || failed=1 - fi - # Don't run unit tests if pre-unit tests failed - if (( ! failed )); then - if function_exists unit_tests; then - unit_tests || failed=1 - else - default_unit_test_runner || failed=1 - fi - fi - # Don't run post-unit tests if pre/unit tests failed - if (( ! failed )) && function_exists post_unit_tests; then - post_unit_tests || failed=1 - fi - results_banner "Unit" ${failed} - return ${failed} -} - -# Default unit test runner that runs all go tests in the repo. -function default_unit_test_runner() { - report_go_test ./... -} - -# Run integration tests. If there's no `integration_tests` function, run the -# default integration test runner. -function run_integration_tests() { - # Don't run integration tests if not requested OR on documentation PRs - (( ! RUN_INTEGRATION_TESTS )) && return 0 - if (( IS_DOCUMENTATION_PR )); then - header "Documentation only PR, skipping integration tests" - return 0 - fi - header "Running integration tests" - local failed=0 - # Run pre-integration tests, if any - if function_exists pre_integration_tests; then - pre_integration_tests || failed=1 - fi - # Don't run integration tests if pre-integration tests failed - if (( ! failed )); then - if function_exists integration_tests; then - integration_tests || failed=1 - else - default_integration_test_runner || failed=1 - fi - fi - # Don't run integration tests if pre/integration tests failed - if (( ! failed )) && function_exists post_integration_tests; then - post_integration_tests || failed=1 - fi - results_banner "Integration" ${failed} - return ${failed} -} - -# Default integration test runner that runs all `test/e2e-*tests.sh`. -function default_integration_test_runner() { - local options="" - local failed=0 - (( EMIT_METRICS )) && options="--emit-metrics" - for e2e_test in $(find test/ -name e2e-*tests.sh); do - echo "Running integration test ${e2e_test}" - if ! ${e2e_test} ${options}; then - failed=1 - fi - done - return ${failed} -} - -# Options set by command-line flags. -RUN_BUILD_TESTS=0 -RUN_UNIT_TESTS=0 -RUN_INTEGRATION_TESTS=0 -EMIT_METRICS=0 - -# Process flags and run tests accordingly. -function main() { - initialize_environment - if (( IS_PRESUBMIT_EXEMPT_PR )) && (( ! IS_DOCUMENTATION_PR )); then - header "Commit only contains changes that don't require tests, skipping" - exit 0 - fi - - # Show the version of the tools we're using - if (( IS_PROW )); then - # Disable gcloud update notifications - gcloud config set component_manager/disable_update_check true - header "Current test setup" - echo ">> gcloud SDK version" - gcloud version - echo ">> kubectl version" - kubectl version --client - echo ">> go version" - go version - echo ">> git version" - git version - echo ">> ko built from commit" - [[ -f /ko_version ]] && cat /ko_version || echo "unknown" - echo ">> bazel version" - [[ -f /bazel_version ]] && cat /bazel_version || echo "unknown" - if [[ "${DOCKER_IN_DOCKER_ENABLED}" == "true" ]]; then - echo ">> docker version" - docker version - fi - # node/pod names are important for debugging purposes, but they are missing - # after migrating from bootstrap to podutil. - # Report it here with the same logic as in bootstrap until it is fixed. - # (https://github.com/kubernetes/test-infra/blob/09bd4c6709dc64308406443f8996f90cf3b40ed1/jenkins/bootstrap.py#L588) - # TODO(chaodaiG): follow up on https://github.com/kubernetes/test-infra/blob/0fabd2ea816daa8c15d410c77a0c93c0550b283f/prow/initupload/run.go#L49 - echo ">> node name" - echo "$(curl -H "Metadata-Flavor: Google" 'http://169.254.169.254/computeMetadata/v1/instance/name' 2> /dev/null)" - echo ">> pod name" - echo ${HOSTNAME} - fi - - [[ -z $1 ]] && set -- "--all-tests" - - local TEST_TO_RUN="" - - while [[ $# -ne 0 ]]; do - local parameter=$1 - case ${parameter} in - --build-tests) RUN_BUILD_TESTS=1 ;; - --unit-tests) RUN_UNIT_TESTS=1 ;; - --integration-tests) RUN_INTEGRATION_TESTS=1 ;; - --emit-metrics) EMIT_METRICS=1 ;; - --all-tests) - RUN_BUILD_TESTS=1 - RUN_UNIT_TESTS=1 - RUN_INTEGRATION_TESTS=1 - ;; - --run-test) - shift - [[ $# -ge 1 ]] || abort "missing executable after --run-test" - TEST_TO_RUN=$1 - ;; - *) abort "error: unknown option ${parameter}" ;; - esac - shift - done - - readonly RUN_BUILD_TESTS - readonly RUN_UNIT_TESTS - readonly RUN_INTEGRATION_TESTS - readonly EMIT_METRICS - readonly TEST_TO_RUN - - cd ${REPO_ROOT_DIR} - - # Tests to be performed, in the right order if --all-tests is passed. - - local failed=0 - - if [[ -n "${TEST_TO_RUN}" ]]; then - if (( RUN_BUILD_TESTS || RUN_UNIT_TESTS || RUN_INTEGRATION_TESTS )); then - abort "--run-test must be used alone" - fi - # If this is a presubmit run, but a documentation-only PR, don't run the test - if (( IS_PRESUBMIT && IS_DOCUMENTATION_PR )); then - header "Documentation only PR, skipping running custom test" - exit 0 - fi - ${TEST_TO_RUN} || failed=1 - fi - - run_build_tests || failed=1 - # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run unit tests if build tests failed - if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then - run_unit_tests || failed=1 - fi - # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run integration tests if build/unit tests failed - if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then - run_integration_tests || failed=1 - fi - - exit ${failed} -} diff --git a/vendor/github.com/knative/test-infra/scripts/release.sh b/vendor/github.com/knative/test-infra/scripts/release.sh deleted file mode 100755 index e3f92b47a..000000000 --- a/vendor/github.com/knative/test-infra/scripts/release.sh +++ /dev/null @@ -1,558 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is a helper script for Knative release scripts. -# See README.md for instructions on how to use it. - -source $(dirname ${BASH_SOURCE})/library.sh - -# GitHub upstream. -readonly KNATIVE_UPSTREAM="https://github.com/knative/${REPO_NAME}" - -# GCRs for Knative releases. -readonly NIGHTLY_GCR="gcr.io/knative-nightly/github.com/knative/${REPO_NAME}" -readonly RELEASE_GCR="gcr.io/knative-releases/github.com/knative/${REPO_NAME}" - -# Georeplicate images to {us,eu,asia}.gcr.io -readonly GEO_REPLICATION=(us eu asia) - -# Simple banner for logging purposes. -# Parameters: $1 - message to display. -function banner() { - make_banner "@" "$1" -} - -# Tag images in the yaml files if $TAG is not empty. -# $KO_DOCKER_REPO is the registry containing the images to tag with $TAG. -# Parameters: $1..$n - files to parse for images (non .yaml files are ignored). -function tag_images_in_yamls() { - [[ -z ${TAG} ]] && return 0 - local SRC_DIR="${GOPATH}/src/" - local DOCKER_BASE="${KO_DOCKER_REPO}/${REPO_ROOT_DIR/$SRC_DIR}" - local GEO_REGIONS="${GEO_REPLICATION[@]} " - echo "Tagging any images under '${DOCKER_BASE}' with ${TAG}" - for file in $@; do - [[ "${file##*.}" != "yaml" ]] && continue - echo "Inspecting ${file}" - for image in $(grep -o "${DOCKER_BASE}/[a-z\./-]\+@sha256:[0-9a-f]\+" ${file}); do - for region in "" ${GEO_REGIONS// /. }; do - gcloud -q container images add-tag ${image} ${region}${image%%@*}:${TAG} - done - done - done -} - -# Copy the given files to the $RELEASE_GCS_BUCKET bucket's "latest" directory. -# If $TAG is not empty, also copy them to $RELEASE_GCS_BUCKET bucket's "previous" directory. -# Parameters: $1..$n - files to copy. -function publish_to_gcs() { - function verbose_gsutil_cp { - local DEST="gs://${RELEASE_GCS_BUCKET}/$1/" - shift - echo "Publishing [$@] to ${DEST}" - gsutil -m cp $@ ${DEST} - } - # Before publishing the files, cleanup the `latest` dir if it exists. - local latest_dir="gs://${RELEASE_GCS_BUCKET}/latest" - if [[ -n "$(gsutil ls ${latest_dir} 2> /dev/null)" ]]; then - echo "Cleaning up '${latest_dir}' first" - gsutil -m rm ${latest_dir}/** - fi - verbose_gsutil_cp latest $@ - [[ -n ${TAG} ]] && verbose_gsutil_cp previous/${TAG} $@ -} - -# These are global environment variables. -SKIP_TESTS=0 -PRESUBMIT_TEST_FAIL_FAST=1 -TAG_RELEASE=0 -PUBLISH_RELEASE=0 -PUBLISH_TO_GITHUB=0 -TAG="" -BUILD_COMMIT_HASH="" -BUILD_YYYYMMDD="" -BUILD_TIMESTAMP="" -BUILD_TAG="" -RELEASE_VERSION="" -RELEASE_NOTES="" -RELEASE_BRANCH="" -RELEASE_GCS_BUCKET="knative-nightly/${REPO_NAME}" -KO_FLAGS="-P" -VALIDATION_TESTS="./test/presubmit-tests.sh" -YAMLS_TO_PUBLISH="" -ARTIFACTS_TO_PUBLISH="" -FROM_NIGHTLY_RELEASE="" -FROM_NIGHTLY_RELEASE_GCS="" -export KO_DOCKER_REPO="gcr.io/knative-nightly" -export GITHUB_TOKEN="" - -# Convenience function to run the hub tool. -# Parameters: $1..$n - arguments to hub. -function hub_tool() { - run_go_tool github.com/github/hub hub $@ -} - -# Shortcut to "git push" that handles authentication. -# Parameters: $1..$n - arguments to "git push ". -function git_push() { - local repo_url="${KNATIVE_UPSTREAM}" - [[ -n "${GITHUB_TOKEN}}" ]] && repo_url="${repo_url/:\/\//:\/\/${GITHUB_TOKEN}@}" - git push ${repo_url} $@ -} - -# Return the master version of a release. -# For example, "v0.2.1" returns "0.2" -# Parameters: $1 - release version label. -function master_version() { - local release="${1//v/}" - local tokens=(${release//\./ }) - echo "${tokens[0]}.${tokens[1]}" -} - -# Return the release build number of a release. -# For example, "v0.2.1" returns "1". -# Parameters: $1 - release version label. -function release_build_number() { - local tokens=(${1//\./ }) - echo "${tokens[2]}" -} - -# Return the short commit SHA from a release tag. -# For example, "v20010101-deadbeef" returns "deadbeef". -function hash_from_tag() { - local tokens=(${1//-/ }) - echo "${tokens[1]}" -} - -# Setup the repository upstream, if not set. -function setup_upstream() { - # hub and checkout need the upstream URL to be set - # TODO(adrcunha): Use "git remote get-url" once available on Prow. - local upstream="$(git config --get remote.upstream.url)" - echo "Remote upstream URL is '${upstream}'" - if [[ -z "${upstream}" ]]; then - echo "Setting remote upstream URL to '${KNATIVE_UPSTREAM}'" - git remote add upstream ${KNATIVE_UPSTREAM} - fi -} - -# Fetch the release branch, so we can check it out. -function setup_branch() { - [[ -z "${RELEASE_BRANCH}" ]] && return - git fetch ${KNATIVE_UPSTREAM} ${RELEASE_BRANCH}:upstream/${RELEASE_BRANCH} -} - -# Setup version, branch and release notes for a auto release. -function prepare_auto_release() { - echo "Auto release requested" - TAG_RELEASE=1 - PUBLISH_RELEASE=1 - - git fetch --all || abort "error fetching branches/tags from remote" - local tags="$(git tag | cut -d 'v' -f2 | cut -d '.' -f1-2 | sort | uniq)" - local branches="$( { (git branch -r | grep upstream/release-) ; (git branch | grep release-); } | cut -d '-' -f2 | sort | uniq)" - - echo "Versions released (from tags): [" ${tags} "]" - echo "Versions released (from branches): [" ${branches} "]" - - local release_number="" - for i in ${branches}; do - release_number="${i}" - for j in ${tags}; do - if [[ "${i}" == "${j}" ]]; then - release_number="" - fi - done - done - - if [[ -z "${release_number}" ]]; then - echo "*** No new release will be generated, as no new branches exist" - exit 0 - fi - - RELEASE_VERSION="${release_number}.0" - RELEASE_BRANCH="release-${release_number}" - echo "Will create release ${RELEASE_VERSION} from branch ${RELEASE_BRANCH}" - # If --release-notes not used, add a placeholder - if [[ -z "${RELEASE_NOTES}" ]]; then - RELEASE_NOTES="$(mktemp)" - echo "[add release notes here]" > ${RELEASE_NOTES} - fi -} - -# Setup version, branch and release notes for a "dot" release. -function prepare_dot_release() { - echo "Dot release requested" - TAG_RELEASE=1 - PUBLISH_RELEASE=1 - # List latest release - local releases # don't combine with the line below, or $? will be 0 - releases="$(hub_tool release)" - [[ $? -eq 0 ]] || abort "cannot list releases" - # If --release-branch passed, restrict to that release - if [[ -n "${RELEASE_BRANCH}" ]]; then - local version_filter="v${RELEASE_BRANCH##release-}" - echo "Dot release will be generated for ${version_filter}" - releases="$(echo "${releases}" | grep ^${version_filter})" - fi - local last_version="$(echo "${releases}" | grep '^v[0-9]\+\.[0-9]\+\.[0-9]\+$' | sort -r | head -1)" - [[ -n "${last_version}" ]] || abort "no previous release exist" - if [[ -z "${RELEASE_BRANCH}" ]]; then - echo "Last release is ${last_version}" - # Determine branch - local major_minor_version="$(master_version ${last_version})" - RELEASE_BRANCH="release-${major_minor_version}" - echo "Last release branch is ${RELEASE_BRANCH}" - fi - # Ensure there are new commits in the branch, otherwise we don't create a new release - setup_branch - local last_release_commit="$(git rev-list -n 1 ${last_version})" - local release_branch_commit="$(git rev-list -n 1 upstream/${RELEASE_BRANCH})" - [[ -n "${last_release_commit}" ]] || abort "cannot get last release commit" - [[ -n "${release_branch_commit}" ]] || abort "cannot get release branch last commit" - if [[ "${last_release_commit}" == "${release_branch_commit}" ]]; then - echo "*** Branch ${RELEASE_BRANCH} is at commit ${release_branch_commit}" - echo "*** Branch ${RELEASE_BRANCH} has no new cherry-picks since release ${last_version}" - echo "*** No dot release will be generated, as no changes exist" - exit 0 - fi - # Create new release version number - local last_build="$(release_build_number ${last_version})" - RELEASE_VERSION="${major_minor_version}.$(( last_build + 1 ))" - echo "Will create release ${RELEASE_VERSION} at commit ${release_branch_commit}" - # If --release-notes not used, copy from the latest release - if [[ -z "${RELEASE_NOTES}" ]]; then - RELEASE_NOTES="$(mktemp)" - hub_tool release show -f "%b" ${last_version} > ${RELEASE_NOTES} - echo "Release notes from ${last_version} copied to ${RELEASE_NOTES}" - fi -} - -# Setup source nightly image for a release. -function prepare_from_nightly_release() { - echo "Release from nightly requested" - SKIP_TESTS=1 - if [[ "${FROM_NIGHTLY_RELEASE}" == "latest" ]]; then - echo "Finding the latest nightly release" - find_latest_nightly "${NIGHTLY_GCR}" || abort "cannot find the latest nightly release" - echo "Latest nightly is ${FROM_NIGHTLY_RELEASE}" - fi - readonly FROM_NIGHTLY_RELEASE_GCS="gs://knative-nightly/${REPO_NAME}/previous/${FROM_NIGHTLY_RELEASE}" - gsutil ls -d "${FROM_NIGHTLY_RELEASE_GCS}" > /dev/null \ - || abort "nightly release ${FROM_NIGHTLY_RELEASE} doesn't exist" -} - -# Build a release from an existing nightly one. -function build_from_nightly_release() { - banner "Building the release" - echo "Fetching manifests from nightly" - local yamls_dir="$(mktemp -d)" - gsutil -m cp -r "${FROM_NIGHTLY_RELEASE_GCS}/*" "${yamls_dir}" || abort "error fetching manifests" - # Update references to release GCR - for yaml in ${yamls_dir}/*.yaml; do - sed -i -e "s#${NIGHTLY_GCR}#${RELEASE_GCR}#" "${yaml}" - done - ARTIFACTS_TO_PUBLISH="$(find ${yamls_dir} -name '*.yaml' -printf '%p ')" - echo "Copying nightly images" - copy_nightly_images_to_release_gcr "${NIGHTLY_GCR}" "${FROM_NIGHTLY_RELEASE}" - # Create a release branch from the nightly release tag. - local commit="$(hash_from_tag ${FROM_NIGHTLY_RELEASE})" - echo "Creating release branch ${RELEASE_BRANCH} at commit ${commit}" - git checkout -b ${RELEASE_BRANCH} ${commit} || abort "cannot create branch" - git_push upstream ${RELEASE_BRANCH} || abort "cannot push branch" -} - -# Build a release from source. -function build_from_source() { - run_validation_tests ${VALIDATION_TESTS} - banner "Building the release" - build_release - # Do not use `||` above or any error will be swallowed. - if [[ $? -ne 0 ]]; then - abort "error building the release" - fi -} - -# Copy tagged images from the nightly GCR to the release GCR, tagging them 'latest'. -# This is a recursive function, first call must pass $NIGHTLY_GCR as first parameter. -# Parameters: $1 - GCR to recurse into. -# $2 - tag to be used to select images to copy. -function copy_nightly_images_to_release_gcr() { - for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do - copy_nightly_images_to_release_gcr "${entry}" "$2" - # Copy each image with the given nightly tag - for x in $(gcloud --format="value(tags)" container images list-tags "${entry}" --filter="tags=$2" --limit=1); do - local path="${entry/${NIGHTLY_GCR}}" # Image "path" (remove GCR part) - local dst="${RELEASE_GCR}${path}:latest" - gcloud container images add-tag "${entry}:$2" "${dst}" || abort "error copying image" - done - done -} - -# Recurse into GCR and find the nightly tag of the first `latest` image found. -# Parameters: $1 - GCR to recurse into. -function find_latest_nightly() { - for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do - find_latest_nightly "${entry}" && return 0 - for tag in $(gcloud --format="value(tags)" container images list-tags "${entry}" \ - --filter="tags=latest" --limit=1); do - local tags=( ${tag//,/ } ) - # Skip if more than one nightly tag, as we don't know what's the latest. - if [[ ${#tags[@]} -eq 2 ]]; then - local nightly_tag="${tags[@]/latest}" # Remove 'latest' tag - FROM_NIGHTLY_RELEASE="${nightly_tag// /}" # Remove spaces - return 0 - fi - done - done - return 1 -} - -# Parses flags and sets environment variables accordingly. -function parse_flags() { - local has_gcr_flag=0 - local has_gcs_flag=0 - local is_dot_release=0 - local is_auto_release=0 - - cd ${REPO_ROOT_DIR} - while [[ $# -ne 0 ]]; do - local parameter=$1 - case ${parameter} in - --skip-tests) SKIP_TESTS=1 ;; - --tag-release) TAG_RELEASE=1 ;; - --notag-release) TAG_RELEASE=0 ;; - --publish) PUBLISH_RELEASE=1 ;; - --nopublish) PUBLISH_RELEASE=0 ;; - --dot-release) is_dot_release=1 ;; - --auto-release) is_auto_release=1 ;; - --from-latest-nightly) FROM_NIGHTLY_RELEASE=latest ;; - *) - [[ $# -ge 2 ]] || abort "missing parameter after $1" - shift - case ${parameter} in - --github-token) - [[ ! -f "$1" ]] && abort "file $1 doesn't exist" - # Remove any trailing newline/space from token - GITHUB_TOKEN="$(echo -n $(cat $1))" - [[ -n "${GITHUB_TOKEN}" ]] || abort "file $1 is empty" - ;; - --release-gcr) - KO_DOCKER_REPO=$1 - has_gcr_flag=1 - ;; - --release-gcs) - RELEASE_GCS_BUCKET=$1 - has_gcs_flag=1 - ;; - --version) - [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'" - RELEASE_VERSION=$1 - ;; - --branch) - [[ $1 =~ ^release-[0-9]+\.[0-9]+$ ]] || abort "branch name must be 'release-[0-9].[0-9]'" - RELEASE_BRANCH=$1 - ;; - --release-notes) - [[ ! -f "$1" ]] && abort "file $1 doesn't exist" - RELEASE_NOTES=$1 - ;; - --from-nightly) - [[ $1 =~ ^v[0-9]+-[0-9a-f]+$ ]] || abort "nightly tag must be 'vYYYYMMDD-commithash'" - FROM_NIGHTLY_RELEASE=$1 - ;; - *) abort "unknown option ${parameter}" ;; - esac - esac - shift - done - - # Do auto release unless release is forced - if (( is_auto_release )); then - (( is_dot_release )) && abort "cannot have both --dot-release and --auto-release set simultaneously" - [[ -n "${RELEASE_VERSION}" ]] && abort "cannot have both --version and --auto-release set simultaneously" - [[ -n "${RELEASE_BRANCH}" ]] && abort "cannot have both --branch and --auto-release set simultaneously" - [[ -n "${FROM_NIGHTLY_RELEASE}" ]] && abort "cannot have --auto-release with a nightly source" - setup_upstream - prepare_auto_release - fi - - # Setup source nightly image - if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then - (( is_dot_release )) && abort "dot releases are built from source" - [[ -z "${RELEASE_VERSION}" ]] && abort "release version must be specified with --version" - # TODO(adrcunha): "dot" releases from release branches require releasing nightlies - # for such branches, which we don't do yet. - [[ "${RELEASE_VERSION}" =~ ^[0-9]+\.[0-9]+\.0$ ]] || abort "version format must be 'X.Y.0'" - RELEASE_BRANCH="release-$(master_version ${RELEASE_VERSION})" - prepare_from_nightly_release - setup_upstream - fi - - # Setup dot releases - if (( is_dot_release )); then - setup_upstream - prepare_dot_release - fi - - # Update KO_DOCKER_REPO and KO_FLAGS if we're not publishing. - if (( ! PUBLISH_RELEASE )); then - (( has_gcr_flag )) && echo "Not publishing the release, GCR flag is ignored" - (( has_gcs_flag )) && echo "Not publishing the release, GCS flag is ignored" - KO_DOCKER_REPO="ko.local" - KO_FLAGS="-L ${KO_FLAGS}" - RELEASE_GCS_BUCKET="" - fi - - # Get the commit, excluding any tags but keeping the "dirty" flag - BUILD_COMMIT_HASH="$(git describe --always --dirty --match '^$')" - [[ -n "${BUILD_COMMIT_HASH}" ]] || abort "error getting the current commit" - BUILD_YYYYMMDD="$(date -u +%Y%m%d)" - BUILD_TIMESTAMP="$(date -u '+%Y-%m-%d %H:%M:%S')" - BUILD_TAG="v${BUILD_YYYYMMDD}-${BUILD_COMMIT_HASH}" - - (( TAG_RELEASE )) && TAG="${BUILD_TAG}" - [[ -n "${RELEASE_VERSION}" ]] && TAG="v${RELEASE_VERSION}" - [[ -n "${RELEASE_VERSION}" && -n "${RELEASE_BRANCH}" ]] && (( PUBLISH_RELEASE )) && PUBLISH_TO_GITHUB=1 - - readonly BUILD_COMMIT_HASH - readonly BUILD_YYYYMMDD - readonly BUILD_TIMESTAMP - readonly BUILD_TAG - readonly SKIP_TESTS - readonly TAG_RELEASE - readonly PUBLISH_RELEASE - readonly PUBLISH_TO_GITHUB - readonly TAG - readonly RELEASE_VERSION - readonly RELEASE_NOTES - readonly RELEASE_BRANCH - readonly RELEASE_GCS_BUCKET - readonly KO_DOCKER_REPO - readonly VALIDATION_TESTS - readonly FROM_NIGHTLY_RELEASE -} - -# Run tests (unless --skip-tests was passed). Conveniently displays a banner indicating so. -# Parameters: $1 - executable that runs the tests. -function run_validation_tests() { - if (( ! SKIP_TESTS )); then - banner "Running release validation tests" - # Run tests. - if ! $1; then - banner "Release validation tests failed, aborting" - exit 1 - fi - fi -} - -# Publishes the generated artifacts to GCS, GitHub, etc. -# Parameters: $1..$n - files to add to the release. -function publish_artifacts() { - (( ! PUBLISH_RELEASE )) && return - tag_images_in_yamls ${ARTIFACTS_TO_PUBLISH} - publish_to_gcs ${ARTIFACTS_TO_PUBLISH} - publish_to_github ${ARTIFACTS_TO_PUBLISH} - banner "New release published successfully" -} - -# Entry point for a release script. -function main() { - function_exists build_release || abort "function 'build_release()' not defined" - [[ -x ${VALIDATION_TESTS} ]] || abort "test script '${VALIDATION_TESTS}' doesn't exist" - parse_flags $@ - # Log what will be done and where. - banner "Release configuration" - echo "- gcloud user: $(gcloud config get-value core/account)" - echo "- Go path: ${GOPATH}" - echo "- Repository root: ${REPO_ROOT_DIR}" - echo "- Destination GCR: ${KO_DOCKER_REPO}" - (( SKIP_TESTS )) && echo "- Tests will NOT be run" || echo "- Tests will be run" - if (( TAG_RELEASE )); then - echo "- Artifacts will be tagged '${TAG}'" - else - echo "- Artifacts WILL NOT be tagged" - fi - if (( PUBLISH_RELEASE )); then - echo "- Release WILL BE published to '${RELEASE_GCS_BUCKET}'" - else - echo "- Release will not be published" - fi - if (( PUBLISH_TO_GITHUB )); then - echo "- Release WILL BE published to GitHub" - fi - if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then - echo "- Release will be A COPY OF '${FROM_NIGHTLY_RELEASE}' nightly" - else - echo "- Release will be BUILT FROM SOURCE" - [[ -n "${RELEASE_BRANCH}" ]] && echo "- Release will be built from branch '${RELEASE_BRANCH}'" - fi - [[ -n "${RELEASE_NOTES}" ]] && echo "- Release notes are generated from '${RELEASE_NOTES}'" - - # Checkout specific branch, if necessary - if [[ -n "${RELEASE_BRANCH}" && -z "${FROM_NIGHTLY_RELEASE}" ]]; then - setup_upstream - setup_branch - git checkout upstream/${RELEASE_BRANCH} || abort "cannot checkout branch ${RELEASE_BRANCH}" - fi - - if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then - build_from_nightly_release - else - set -e -o pipefail - build_from_source - set +e +o pipefail - fi - # TODO(adrcunha): Remove once all repos use ARTIFACTS_TO_PUBLISH. - [[ -z "${ARTIFACTS_TO_PUBLISH}" ]] && ARTIFACTS_TO_PUBLISH="${YAMLS_TO_PUBLISH}" - [[ -z "${ARTIFACTS_TO_PUBLISH}" ]] && abort "no artifacts were generated" - # Ensure no empty file will be published. - for artifact in ${ARTIFACTS_TO_PUBLISH}; do - [[ -s ${artifact} ]] || abort "Artifact ${artifact} is empty" - done - echo "New release built successfully" - publish_artifacts -} - -# Publishes a new release on GitHub, also git tagging it (unless this is not a versioned release). -# Parameters: $1..$n - files to add to the release. -function publish_to_github() { - (( PUBLISH_TO_GITHUB )) || return 0 - local title="${REPO_NAME_FORMATTED} release ${TAG}" - local attachments=() - local description="$(mktemp)" - local attachments_dir="$(mktemp -d)" - local commitish="" - # Copy files to a separate dir - for artifact in $@; do - cp ${artifact} ${attachments_dir}/ - attachments+=("--attach=${artifact}#$(basename ${artifact})") - done - echo -e "${title}\n" > ${description} - if [[ -n "${RELEASE_NOTES}" ]]; then - cat ${RELEASE_NOTES} >> ${description} - fi - git tag -a ${TAG} -m "${title}" - git_push tag ${TAG} - - [[ -n "${RELEASE_BRANCH}" ]] && commitish="--commitish=${RELEASE_BRANCH}" - hub_tool release create \ - --prerelease \ - ${attachments[@]} \ - --file=${description} \ - ${commitish} \ - ${TAG} -} diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/README.md b/vendor/github.com/knative/test-infra/tools/dep-collector/README.md deleted file mode 100644 index 418eef9d3..000000000 --- a/vendor/github.com/knative/test-infra/tools/dep-collector/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# dep-collector - -`dep-collector` is a tool for gathering up a collection of licenses for Go -dependencies that have been pulled into the idiomatic `vendor/` directory. The -resulting file from running `dep-collector` is intended for inclusion in -container images to respect the licenses of the included software. - -## Basic Usage - -You can run `dep-collector` on one or more Go import paths as entrypoints, and -it will: - -1. Walk the transitive dependencies to identify vendored software packages, -1. Search for licenses for each vendored dependency, -1. Dump a file containing the licenses for each vendored import. - -For example (single import path): - -```shell -$ dep-collector . -=========================================================== -Import: github.com/mattmoor/dep-collector/vendor/github.com/google/licenseclassifier - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ -... - -``` - -For example (multiple import paths): - -```shell -$ dep-collector ./cmd/controller ./cmd/sleeper - -=========================================================== -Import: github.com/mattmoor/warm-image/vendor/cloud.google.com/go - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ -``` - -## CSV Usage - -You can also run `dep-collector` in a mode that produces CSV output, including -basic classification of the license. - -> In order to run dep-collector in this mode, you must first run: go get -> github.com/google/licenseclassifier - -For example: - -```shell -$ dep-collector -csv . -github.com/google/licenseclassifier,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/google/licenseclassifier/LICENSE,Apache-2.0 -github.com/google/licenseclassifier/stringclassifier,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/google/licenseclassifier/stringclassifier/LICENSE,Apache-2.0 -github.com/sergi/go-diff,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/sergi/go-diff/LICENSE,MIT - -``` - -The columns here are: - -- Import Path, -- How the dependency is linked in (always reports "static"), -- A column for whether any modifications have been made (always empty), -- The URL by which to access the license file (assumes `master`), -- A classification of what license this is - ([using this](https://github.com/google/licenseclassifier)). - -## Check mode - -`dep-collector` also includes a mode that will check for "forbidden" licenses. - -> In order to run dep-collector in this mode, you must first run: go get -> github.com/google/licenseclassifier - -For example (failing): - -```shell -$ dep-collector -check ./foo/bar/baz -2018/07/20 22:01:29 Error checking license collection: Errors validating licenses: -Found matching forbidden license in "foo.io/bar/vendor/github.com/BurntSushi/toml":WTFPL -``` - -For example (passing): - -```shell -$ dep-collector -check . -2018/07/20 22:29:09 No errors found. -``` diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 000000000..5d8cb5b72 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 000000000..258c0636a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 000000000..c318385cb --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 000000000..8fb59ad22 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/petar/GoLLRB/AUTHORS b/vendor/github.com/petar/GoLLRB/AUTHORS new file mode 100644 index 000000000..78d1de495 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/AUTHORS @@ -0,0 +1,4 @@ +Petar Maymounkov +Vadim Vygonets +Ian Smith +Martin Bruse diff --git a/vendor/github.com/petar/GoLLRB/LICENSE b/vendor/github.com/petar/GoLLRB/LICENSE new file mode 100644 index 000000000..b75312c78 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2010, Petar Maymounkov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +(*) Redistributions of source code must retain the above copyright notice, this list +of conditions and the following disclaimer. + +(*) Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +(*) Neither the name of Petar Maymounkov nor the names of its contributors may be +used to endorse or promote products derived from this software without specific +prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/petar/GoLLRB/llrb/avgvar.go b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go new file mode 100644 index 000000000..2d7e2a326 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go @@ -0,0 +1,39 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +import "math" + +// avgVar maintains the average and variance of a stream of numbers +// in a space-efficient manner. +type avgVar struct { + count int64 + sum, sumsq float64 +} + +func (av *avgVar) Init() { + av.count = 0 + av.sum = 0.0 + av.sumsq = 0.0 +} + +func (av *avgVar) Add(sample float64) { + av.count++ + av.sum += sample + av.sumsq += sample * sample +} + +func (av *avgVar) GetCount() int64 { return av.count } + +func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) } + +func (av *avgVar) GetTotal() float64 { return av.sum } + +func (av *avgVar) GetVar() float64 { + a := av.GetAvg() + return av.sumsq/float64(av.count) - a*a +} + +func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) } diff --git a/vendor/github.com/petar/GoLLRB/llrb/iterator.go b/vendor/github.com/petar/GoLLRB/llrb/iterator.go new file mode 100644 index 000000000..3449253c2 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/iterator.go @@ -0,0 +1,95 @@ +package llrb + +type ItemIterator func(i Item) bool + +//func (t *Tree) Ascend(iterator ItemIterator) { +// t.AscendGreaterOrEqual(Inf(-1), iterator) +//} + +func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + t.ascendRange(t.root, greaterOrEqual, lessThan, iterator) +} + +func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !less(h.Item, sup) { + return t.ascendRange(h.Left, inf, sup, iterator) + } + if less(h.Item, inf) { + return t.ascendRange(h.Right, inf, sup, iterator) + } + + if !t.ascendRange(h.Left, inf, sup, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + return t.ascendRange(h.Right, inf, sup, iterator) +} + +// AscendGreaterOrEqual will call iterator once for each element greater or equal to +// pivot in ascending order. It will stop whenever the iterator returns false. +func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + t.ascendGreaterOrEqual(t.root, pivot, iterator) +} + +func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !less(h.Item, pivot) { + if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + } + return t.ascendGreaterOrEqual(h.Right, pivot, iterator) +} + +// AscendLessThan will call iterator once for each element lower than +// pivot in ascending order. It will stop whenever the iterator returns false. +func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) { + t.ascendLessThan(t.root, pivot, iterator) +} + +func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !t.ascendLessThan(h.Left, pivot, iterator) { + return false + } + if less(h.Item, pivot) { + if !iterator(h.Item) { + return false + } + return t.ascendLessThan(h.Right, pivot, iterator) + } + return true +} + +// DescendLessOrEqual will call iterator once for each element less than the +// pivot in descending order. It will stop whenever the iterator returns false. +func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + t.descendLessOrEqual(t.root, pivot, iterator) +} + +func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if less(h.Item, pivot) || !less(pivot, h.Item) { + if !t.descendLessOrEqual(h.Right, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + } + return t.descendLessOrEqual(h.Left, pivot, iterator) +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go new file mode 100644 index 000000000..592bc9b2b --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go @@ -0,0 +1,46 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +// GetHeight returns an item in the tree with key @key, and it's height in the tree +func (t *LLRB) GetHeight(key Item) (result Item, depth int) { + return t.getHeight(t.root, key) +} + +func (t *LLRB) getHeight(h *Node, item Item) (Item, int) { + if h == nil { + return nil, 0 + } + if less(item, h.Item) { + result, depth := t.getHeight(h.Left, item) + return result, depth + 1 + } + if less(h.Item, item) { + result, depth := t.getHeight(h.Right, item) + return result, depth + 1 + } + return h.Item, 0 +} + +// HeightStats returns the average and standard deviation of the height +// of elements in the tree +func (t *LLRB) HeightStats() (avg, stddev float64) { + av := &avgVar{} + heightStats(t.root, 0, av) + return av.GetAvg(), av.GetStdDev() +} + +func heightStats(h *Node, d int, av *avgVar) { + if h == nil { + return + } + av.Add(float64(d)) + if h.Left != nil { + heightStats(h.Left, d+1, av) + } + if h.Right != nil { + heightStats(h.Right, d+1, av) + } +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb.go b/vendor/github.com/petar/GoLLRB/llrb/llrb.go new file mode 100644 index 000000000..9edde2fca --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/llrb.go @@ -0,0 +1,456 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees, +// based on the following work: +// +// http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf +// http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf +// http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java +// +// 2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST +// algoritms found in implementations of Python, Java, and other libraries. The LLRB +// implementation of 2-3 trees is a recent improvement on the traditional implementation, +// observed and documented by Robert Sedgewick. +// +package llrb + +// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees +type LLRB struct { + count int + root *Node +} + +type Node struct { + Item + Left, Right *Node // Pointers to left and right child nodes + Black bool // If set, the color of the link (incoming from the parent) is black + // In the LLRB, new nodes are always red, hence the zero-value for node +} + +type Item interface { + Less(than Item) bool +} + +// +func less(x, y Item) bool { + if x == pinf { + return false + } + if x == ninf { + return true + } + return x.Less(y) +} + +// Inf returns an Item that is "bigger than" any other item, if sign is positive. +// Otherwise it returns an Item that is "smaller than" any other item. +func Inf(sign int) Item { + if sign == 0 { + panic("sign") + } + if sign > 0 { + return pinf + } + return ninf +} + +var ( + ninf = nInf{} + pinf = pInf{} +) + +type nInf struct{} + +func (nInf) Less(Item) bool { + return true +} + +type pInf struct{} + +func (pInf) Less(Item) bool { + return false +} + +// New allocates a new tree +func New() *LLRB { + return &LLRB{} +} + +// SetRoot sets the root node of the tree. +// It is intended to be used by functions that deserialize the tree. +func (t *LLRB) SetRoot(r *Node) { + t.root = r +} + +// Root returns the root node of the tree. +// It is intended to be used by functions that serialize the tree. +func (t *LLRB) Root() *Node { + return t.root +} + +// Len returns the number of nodes in the tree. +func (t *LLRB) Len() int { return t.count } + +// Has returns true if the tree contains an element whose order is the same as that of key. +func (t *LLRB) Has(key Item) bool { + return t.Get(key) != nil +} + +// Get retrieves an element from the tree whose order is the same as that of key. +func (t *LLRB) Get(key Item) Item { + h := t.root + for h != nil { + switch { + case less(key, h.Item): + h = h.Left + case less(h.Item, key): + h = h.Right + default: + return h.Item + } + } + return nil +} + +// Min returns the minimum element in the tree. +func (t *LLRB) Min() Item { + h := t.root + if h == nil { + return nil + } + for h.Left != nil { + h = h.Left + } + return h.Item +} + +// Max returns the maximum element in the tree. +func (t *LLRB) Max() Item { + h := t.root + if h == nil { + return nil + } + for h.Right != nil { + h = h.Right + } + return h.Item +} + +func (t *LLRB) ReplaceOrInsertBulk(items ...Item) { + for _, i := range items { + t.ReplaceOrInsert(i) + } +} + +func (t *LLRB) InsertNoReplaceBulk(items ...Item) { + for _, i := range items { + t.InsertNoReplace(i) + } +} + +// ReplaceOrInsert inserts item into the tree. If an existing +// element has the same order, it is removed from the tree and returned. +func (t *LLRB) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("inserting nil item") + } + var replaced Item + t.root, replaced = t.replaceOrInsert(t.root, item) + t.root.Black = true + if replaced == nil { + t.count++ + } + return replaced +} + +func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) { + if h == nil { + return newNode(item), nil + } + + h = walkDownRot23(h) + + var replaced Item + if less(item, h.Item) { // BUG + h.Left, replaced = t.replaceOrInsert(h.Left, item) + } else if less(h.Item, item) { + h.Right, replaced = t.replaceOrInsert(h.Right, item) + } else { + replaced, h.Item = h.Item, item + } + + h = walkUpRot23(h) + + return h, replaced +} + +// InsertNoReplace inserts item into the tree. If an existing +// element has the same order, both elements remain in the tree. +func (t *LLRB) InsertNoReplace(item Item) { + if item == nil { + panic("inserting nil item") + } + t.root = t.insertNoReplace(t.root, item) + t.root.Black = true + t.count++ +} + +func (t *LLRB) insertNoReplace(h *Node, item Item) *Node { + if h == nil { + return newNode(item) + } + + h = walkDownRot23(h) + + if less(item, h.Item) { + h.Left = t.insertNoReplace(h.Left, item) + } else { + h.Right = t.insertNoReplace(h.Right, item) + } + + return walkUpRot23(h) +} + +// Rotation driver routines for 2-3 algorithm + +func walkDownRot23(h *Node) *Node { return h } + +func walkUpRot23(h *Node) *Node { + if isRed(h.Right) && !isRed(h.Left) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} + +// Rotation driver routines for 2-3-4 algorithm + +func walkDownRot234(h *Node) *Node { + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} + +func walkUpRot234(h *Node) *Node { + if isRed(h.Right) && !isRed(h.Left) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + return h +} + +// DeleteMin deletes the minimum element in the tree and returns the +// deleted item or nil otherwise. +func (t *LLRB) DeleteMin() Item { + var deleted Item + t.root, deleted = deleteMin(t.root) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +// deleteMin code for LLRB 2-3 trees +func deleteMin(h *Node) (*Node, Item) { + if h == nil { + return nil, nil + } + if h.Left == nil { + return nil, h.Item + } + + if !isRed(h.Left) && !isRed(h.Left.Left) { + h = moveRedLeft(h) + } + + var deleted Item + h.Left, deleted = deleteMin(h.Left) + + return fixUp(h), deleted +} + +// DeleteMax deletes the maximum element in the tree and returns +// the deleted item or nil otherwise +func (t *LLRB) DeleteMax() Item { + var deleted Item + t.root, deleted = deleteMax(t.root) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +func deleteMax(h *Node) (*Node, Item) { + if h == nil { + return nil, nil + } + if isRed(h.Left) { + h = rotateRight(h) + } + if h.Right == nil { + return nil, h.Item + } + if !isRed(h.Right) && !isRed(h.Right.Left) { + h = moveRedRight(h) + } + var deleted Item + h.Right, deleted = deleteMax(h.Right) + + return fixUp(h), deleted +} + +// Delete deletes an item from the tree whose key equals key. +// The deleted item is return, otherwise nil is returned. +func (t *LLRB) Delete(key Item) Item { + var deleted Item + t.root, deleted = t.delete(t.root, key) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +func (t *LLRB) delete(h *Node, item Item) (*Node, Item) { + var deleted Item + if h == nil { + return nil, nil + } + if less(item, h.Item) { + if h.Left == nil { // item not present. Nothing to delete + return h, nil + } + if !isRed(h.Left) && !isRed(h.Left.Left) { + h = moveRedLeft(h) + } + h.Left, deleted = t.delete(h.Left, item) + } else { + if isRed(h.Left) { + h = rotateRight(h) + } + // If @item equals @h.Item and no right children at @h + if !less(h.Item, item) && h.Right == nil { + return nil, h.Item + } + // PETAR: Added 'h.Right != nil' below + if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) { + h = moveRedRight(h) + } + // If @item equals @h.Item, and (from above) 'h.Right != nil' + if !less(h.Item, item) { + var subDeleted Item + h.Right, subDeleted = deleteMin(h.Right) + if subDeleted == nil { + panic("logic") + } + deleted, h.Item = h.Item, subDeleted + } else { // Else, @item is bigger than @h.Item + h.Right, deleted = t.delete(h.Right, item) + } + } + + return fixUp(h), deleted +} + +// Internal node manipulation routines + +func newNode(item Item) *Node { return &Node{Item: item} } + +func isRed(h *Node) bool { + if h == nil { + return false + } + return !h.Black +} + +func rotateLeft(h *Node) *Node { + x := h.Right + if x.Black { + panic("rotating a black link") + } + h.Right = x.Left + x.Left = h + x.Black = h.Black + h.Black = false + return x +} + +func rotateRight(h *Node) *Node { + x := h.Left + if x.Black { + panic("rotating a black link") + } + h.Left = x.Right + x.Right = h + x.Black = h.Black + h.Black = false + return x +} + +// REQUIRE: Left and Right children must be present +func flip(h *Node) { + h.Black = !h.Black + h.Left.Black = !h.Left.Black + h.Right.Black = !h.Right.Black +} + +// REQUIRE: Left and Right children must be present +func moveRedLeft(h *Node) *Node { + flip(h) + if isRed(h.Right.Left) { + h.Right = rotateRight(h.Right) + h = rotateLeft(h) + flip(h) + } + return h +} + +// REQUIRE: Left and Right children must be present +func moveRedRight(h *Node) *Node { + flip(h) + if isRed(h.Left.Left) { + h = rotateRight(h) + flip(h) + } + return h +} + +func fixUp(h *Node) *Node { + if isRed(h.Right) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/util.go b/vendor/github.com/petar/GoLLRB/llrb/util.go new file mode 100644 index 000000000..63dbdb2df --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/util.go @@ -0,0 +1,17 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +type Int int + +func (x Int) Less(than Item) bool { + return x < than.(Int) +} + +type String string + +func (x String) Less(than Item) bool { + return x < than.(String) +} diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE new file mode 100644 index 000000000..41ce7f16e --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go new file mode 100644 index 000000000..5192b0273 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/compression.go @@ -0,0 +1,64 @@ +package diskv + +import ( + "compress/flate" + "compress/gzip" + "compress/zlib" + "io" +) + +// Compression is an interface that Diskv uses to implement compression of +// data. Writer takes a destination io.Writer and returns a WriteCloser that +// compresses all data written through it. Reader takes a source io.Reader and +// returns a ReadCloser that decompresses all data read through it. You may +// define these methods on your own type, or use one of the NewCompression +// helpers. +type Compression interface { + Writer(dst io.Writer) (io.WriteCloser, error) + Reader(src io.Reader) (io.ReadCloser, error) +} + +// NewGzipCompression returns a Gzip-based Compression. +func NewGzipCompression() Compression { + return NewGzipCompressionLevel(flate.DefaultCompression) +} + +// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. +func NewGzipCompressionLevel(level int) Compression { + return &genericCompression{ + wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, + rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, + } +} + +// NewZlibCompression returns a Zlib-based Compression. +func NewZlibCompression() Compression { + return NewZlibCompressionLevel(flate.DefaultCompression) +} + +// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. +func NewZlibCompressionLevel(level int) Compression { + return NewZlibCompressionLevelDict(level, nil) +} + +// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given +// level, based on the given dictionary. +func NewZlibCompressionLevelDict(level int, dict []byte) Compression { + return &genericCompression{ + func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, + func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, + } +} + +type genericCompression struct { + wf func(w io.Writer) (io.WriteCloser, error) + rf func(r io.Reader) (io.ReadCloser, error) +} + +func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { + return g.wf(dst) +} + +func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { + return g.rf(src) +} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go new file mode 100644 index 000000000..0716da351 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/diskv.go @@ -0,0 +1,726 @@ +// Diskv (disk-vee) is a simple, persistent, key-value store. +// It stores all data flatly on the filesystem. + +package diskv + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" +) + +const ( + defaultBasePath = "diskv" + defaultFilePerm os.FileMode = 0666 + defaultPathPerm os.FileMode = 0777 +) + +// PathKey represents a string key that has been transformed to +// a directory and file name where the content will eventually +// be stored +type PathKey struct { + Path []string + FileName string + originalKey string +} + +var ( + defaultAdvancedTransform = func(s string) *PathKey { return &PathKey{Path: []string{}, FileName: s} } + defaultInverseTransform = func(pathKey *PathKey) string { return pathKey.FileName } + errCanceled = errors.New("canceled") + errEmptyKey = errors.New("empty key") + errBadKey = errors.New("bad key") + errImportDirectory = errors.New("can't import a directory") +) + +// TransformFunction transforms a key into a slice of strings, with each +// element in the slice representing a directory in the file path where the +// key's entry will eventually be stored. +// +// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], +// the final location of the data file will be /ab/cde/f/abcdef +type TransformFunction func(s string) []string + +// AdvancedTransformFunction transforms a key into a PathKey. +// +// A PathKey contains a slice of strings, where each element in the slice +// represents a directory in the file path where the key's entry will eventually +// be stored, as well as the filename. +// +// For example, if AdvancedTransformFunc transforms "abcdef/file.txt" to the +// PathKey {Path: ["ab", "cde", "f"], FileName: "file.txt"}, the final location +// of the data file will be /ab/cde/f/file.txt. +// +// You must provide an InverseTransformFunction if you use an +// AdvancedTransformFunction. +type AdvancedTransformFunction func(s string) *PathKey + +// InverseTransformFunction takes a PathKey and converts it back to a Diskv key. +// In effect, it's the opposite of an AdvancedTransformFunction. +type InverseTransformFunction func(pathKey *PathKey) string + +// Options define a set of properties that dictate Diskv behavior. +// All values are optional. +type Options struct { + BasePath string + Transform TransformFunction + AdvancedTransform AdvancedTransformFunction + InverseTransform InverseTransformFunction + CacheSizeMax uint64 // bytes + PathPerm os.FileMode + FilePerm os.FileMode + // If TempDir is set, it will enable filesystem atomic writes by + // writing temporary files to that location before being moved + // to BasePath. + // Note that TempDir MUST be on the same device/partition as + // BasePath. + TempDir string + + Index Index + IndexLess LessFunction + + Compression Compression +} + +// Diskv implements the Diskv interface. You shouldn't construct Diskv +// structures directly; instead, use the New constructor. +type Diskv struct { + Options + mu sync.RWMutex + cache map[string][]byte + cacheSize uint64 +} + +// New returns an initialized Diskv structure, ready to use. +// If the path identified by baseDir already contains data, +// it will be accessible, but not yet cached. +func New(o Options) *Diskv { + if o.BasePath == "" { + o.BasePath = defaultBasePath + } + + if o.AdvancedTransform == nil { + if o.Transform == nil { + o.AdvancedTransform = defaultAdvancedTransform + } else { + o.AdvancedTransform = convertToAdvancedTransform(o.Transform) + } + if o.InverseTransform == nil { + o.InverseTransform = defaultInverseTransform + } + } else { + if o.InverseTransform == nil { + panic("You must provide an InverseTransform function in advanced mode") + } + } + + if o.PathPerm == 0 { + o.PathPerm = defaultPathPerm + } + if o.FilePerm == 0 { + o.FilePerm = defaultFilePerm + } + + d := &Diskv{ + Options: o, + cache: map[string][]byte{}, + cacheSize: 0, + } + + if d.Index != nil && d.IndexLess != nil { + d.Index.Initialize(d.IndexLess, d.Keys(nil)) + } + + return d +} + +// convertToAdvancedTransform takes a classic Transform function and +// converts it to the new AdvancedTransform +func convertToAdvancedTransform(oldFunc func(s string) []string) AdvancedTransformFunction { + return func(s string) *PathKey { + return &PathKey{Path: oldFunc(s), FileName: s} + } +} + +// Write synchronously writes the key-value pair to disk, making it immediately +// available for reads. Write relies on the filesystem to perform an eventual +// sync to physical media. If you need stronger guarantees, see WriteStream. +func (d *Diskv) Write(key string, val []byte) error { + return d.WriteStream(key, bytes.NewReader(val), false) +} + +// WriteString writes a string key-value pair to disk +func (d *Diskv) WriteString(key string, val string) error { + return d.Write(key, []byte(val)) +} + +func (d *Diskv) transform(key string) (pathKey *PathKey) { + pathKey = d.AdvancedTransform(key) + pathKey.originalKey = key + return pathKey +} + +// WriteStream writes the data represented by the io.Reader to the disk, under +// the provided key. If sync is true, WriteStream performs an explicit sync on +// the file as soon as it's written. +// +// bytes.Buffer provides io.Reader semantics for basic data types. +func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { + if len(key) <= 0 { + return errEmptyKey + } + + pathKey := d.transform(key) + + // Ensure keys cannot evaluate to paths that would not exist + for _, pathPart := range pathKey.Path { + if strings.ContainsRune(pathPart, os.PathSeparator) { + return errBadKey + } + } + + if strings.ContainsRune(pathKey.FileName, os.PathSeparator) { + return errBadKey + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.writeStreamWithLock(pathKey, r, sync) +} + +// createKeyFileWithLock either creates the key file directly, or +// creates a temporary file in TempDir if it is set. +func (d *Diskv) createKeyFileWithLock(pathKey *PathKey) (*os.File, error) { + if d.TempDir != "" { + if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { + return nil, fmt.Errorf("temp mkdir: %s", err) + } + f, err := ioutil.TempFile(d.TempDir, "") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + + if err := f.Chmod(d.FilePerm); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return nil, fmt.Errorf("chmod: %s", err) + } + return f, nil + } + + mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists + f, err := os.OpenFile(d.completeFilename(pathKey), mode, d.FilePerm) + if err != nil { + return nil, fmt.Errorf("open file: %s", err) + } + return f, nil +} + +// writeStream does no input validation checking. +func (d *Diskv) writeStreamWithLock(pathKey *PathKey, r io.Reader, sync bool) error { + if err := d.ensurePathWithLock(pathKey); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + f, err := d.createKeyFileWithLock(pathKey) + if err != nil { + return fmt.Errorf("create key file: %s", err) + } + + wc := io.WriteCloser(&nopWriteCloser{f}) + if d.Compression != nil { + wc, err = d.Compression.Writer(f) + if err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression writer: %s", err) + } + } + + if _, err := io.Copy(wc, r); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("i/o copy: %s", err) + } + + if err := wc.Close(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression close: %s", err) + } + + if sync { + if err := f.Sync(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("file sync: %s", err) + } + } + + if err := f.Close(); err != nil { + return fmt.Errorf("file close: %s", err) + } + + fullPath := d.completeFilename(pathKey) + if f.Name() != fullPath { + if err := os.Rename(f.Name(), fullPath); err != nil { + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("rename: %s", err) + } + } + + if d.Index != nil { + d.Index.Insert(pathKey.originalKey) + } + + d.bustCacheWithLock(pathKey.originalKey) // cache only on read + + return nil +} + +// Import imports the source file into diskv under the destination key. If the +// destination key already exists, it's overwritten. If move is true, the +// source file is removed after a successful import. +func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { + if dstKey == "" { + return errEmptyKey + } + + if fi, err := os.Stat(srcFilename); err != nil { + return err + } else if fi.IsDir() { + return errImportDirectory + } + + dstPathKey := d.transform(dstKey) + + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.ensurePathWithLock(dstPathKey); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + if move { + if err := syscall.Rename(srcFilename, d.completeFilename(dstPathKey)); err == nil { + d.bustCacheWithLock(dstPathKey.originalKey) + return nil + } else if err != syscall.EXDEV { + // If it failed due to being on a different device, fall back to copying + return err + } + } + + f, err := os.Open(srcFilename) + if err != nil { + return err + } + defer f.Close() + err = d.writeStreamWithLock(dstPathKey, f, false) + if err == nil && move { + err = os.Remove(srcFilename) + } + return err +} + +// Read reads the key and returns the value. +// If the key is available in the cache, Read won't touch the disk. +// If the key is not in the cache, Read will have the side-effect of +// lazily caching the value. +func (d *Diskv) Read(key string) ([]byte, error) { + rc, err := d.ReadStream(key, false) + if err != nil { + return []byte{}, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} + +// ReadString reads the key and returns a string value +// In case of error, an empty string is returned +func (d *Diskv) ReadString(key string) string { + value, _ := d.Read(key) + return string(value) +} + +// ReadStream reads the key and returns the value (data) as an io.ReadCloser. +// If the value is cached from a previous read, and direct is false, +// ReadStream will use the cached value. Otherwise, it will return a handle to +// the file on disk, and cache the data on read. +// +// If direct is true, ReadStream will lazily delete any cached value for the +// key, and return a direct handle to the file on disk. +// +// If compression is enabled, ReadStream taps into the io.Reader stream prior +// to decompression, and caches the compressed data. +func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { + + pathKey := d.transform(key) + d.mu.RLock() + defer d.mu.RUnlock() + + if val, ok := d.cache[key]; ok { + if !direct { + buf := bytes.NewReader(val) + if d.Compression != nil { + return d.Compression.Reader(buf) + } + return ioutil.NopCloser(buf), nil + } + + go func() { + d.mu.Lock() + defer d.mu.Unlock() + d.uncacheWithLock(key, uint64(len(val))) + }() + } + + return d.readWithRLock(pathKey) +} + +// read ignores the cache, and returns an io.ReadCloser representing the +// decompressed data for the given key, streamed from the disk. Clients should +// acquire a read lock on the Diskv and check the cache themselves before +// calling read. +func (d *Diskv) readWithRLock(pathKey *PathKey) (io.ReadCloser, error) { + filename := d.completeFilename(pathKey) + + fi, err := os.Stat(filename) + if err != nil { + return nil, err + } + if fi.IsDir() { + return nil, os.ErrNotExist + } + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + + var r io.Reader + if d.CacheSizeMax > 0 { + r = newSiphon(f, d, pathKey.originalKey) + } else { + r = &closingReader{f} + } + + var rc = io.ReadCloser(ioutil.NopCloser(r)) + if d.Compression != nil { + rc, err = d.Compression.Reader(r) + if err != nil { + return nil, err + } + } + + return rc, nil +} + +// closingReader provides a Reader that automatically closes the +// embedded ReadCloser when it reaches EOF +type closingReader struct { + rc io.ReadCloser +} + +func (cr closingReader) Read(p []byte) (int, error) { + n, err := cr.rc.Read(p) + if err == io.EOF { + if closeErr := cr.rc.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + } + return n, err +} + +// siphon is like a TeeReader: it copies all data read through it to an +// internal buffer, and moves that buffer to the cache at EOF. +type siphon struct { + f *os.File + d *Diskv + key string + buf *bytes.Buffer +} + +// newSiphon constructs a siphoning reader that represents the passed file. +// When a successful series of reads ends in an EOF, the siphon will write +// the buffered data to Diskv's cache under the given key. +func newSiphon(f *os.File, d *Diskv, key string) io.Reader { + return &siphon{ + f: f, + d: d, + key: key, + buf: &bytes.Buffer{}, + } +} + +// Read implements the io.Reader interface for siphon. +func (s *siphon) Read(p []byte) (int, error) { + n, err := s.f.Read(p) + + if err == nil { + return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed + } + + if err == io.EOF { + s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail + if closeErr := s.f.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + return n, err + } + + return n, err +} + +// Erase synchronously erases the given key from the disk and the cache. +func (d *Diskv) Erase(key string) error { + pathKey := d.transform(key) + d.mu.Lock() + defer d.mu.Unlock() + + d.bustCacheWithLock(key) + + // erase from index + if d.Index != nil { + d.Index.Delete(key) + } + + // erase from disk + filename := d.completeFilename(pathKey) + if s, err := os.Stat(filename); err == nil { + if s.IsDir() { + return errBadKey + } + if err = os.Remove(filename); err != nil { + return err + } + } else { + // Return err as-is so caller can do os.IsNotExist(err). + return err + } + + // clean up and return + d.pruneDirsWithLock(key) + return nil +} + +// EraseAll will delete all of the data from the store, both in the cache and on +// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- +// diskv-related data. Care should be taken to always specify a diskv base +// directory that is exclusively for diskv data. +func (d *Diskv) EraseAll() error { + d.mu.Lock() + defer d.mu.Unlock() + d.cache = make(map[string][]byte) + d.cacheSize = 0 + if d.TempDir != "" { + os.RemoveAll(d.TempDir) // errors ignored + } + return os.RemoveAll(d.BasePath) +} + +// Has returns true if the given key exists. +func (d *Diskv) Has(key string) bool { + pathKey := d.transform(key) + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.cache[key]; ok { + return true + } + + filename := d.completeFilename(pathKey) + s, err := os.Stat(filename) + if err != nil { + return false + } + if s.IsDir() { + return false + } + + return true +} + +// Keys returns a channel that will yield every key accessible by the store, +// in undefined order. If a cancel channel is provided, closing it will +// terminate and close the keys channel. +func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { + return d.KeysPrefix("", cancel) +} + +// KeysPrefix returns a channel that will yield every key accessible by the +// store with the given prefix, in undefined order. If a cancel channel is +// provided, closing it will terminate and close the keys channel. If the +// provided prefix is the empty string, all keys will be yielded. +func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { + var prepath string + if prefix == "" { + prepath = d.BasePath + } else { + prefixKey := d.transform(prefix) + prepath = d.pathFor(prefixKey) + } + c := make(chan string) + go func() { + filepath.Walk(prepath, d.walker(c, prefix, cancel)) + close(c) + }() + return c +} + +// walker returns a function which satisfies the filepath.WalkFunc interface. +// It sends every non-directory file entry down the channel c. +func (d *Diskv) walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, _ := filepath.Rel(d.BasePath, path) + dir, file := filepath.Split(relPath) + pathSplit := strings.Split(dir, string(filepath.Separator)) + pathSplit = pathSplit[:len(pathSplit)-1] + + pathKey := &PathKey{ + Path: pathSplit, + FileName: file, + } + + key := d.InverseTransform(pathKey) + + if info.IsDir() || !strings.HasPrefix(key, prefix) { + return nil // "pass" + } + + select { + case c <- key: + case <-cancel: + return errCanceled + } + + return nil + } +} + +// pathFor returns the absolute path for location on the filesystem where the +// data for the given key will be stored. +func (d *Diskv) pathFor(pathKey *PathKey) string { + return filepath.Join(d.BasePath, filepath.Join(pathKey.Path...)) +} + +// ensurePathWithLock is a helper function that generates all necessary +// directories on the filesystem for the given key. +func (d *Diskv) ensurePathWithLock(pathKey *PathKey) error { + return os.MkdirAll(d.pathFor(pathKey), d.PathPerm) +} + +// completeFilename returns the absolute path to the file for the given key. +func (d *Diskv) completeFilename(pathKey *PathKey) string { + return filepath.Join(d.pathFor(pathKey), pathKey.FileName) +} + +// cacheWithLock attempts to cache the given key-value pair in the store's +// cache. It can fail if the value is larger than the cache's maximum size. +func (d *Diskv) cacheWithLock(key string, val []byte) error { + valueSize := uint64(len(val)) + if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { + return fmt.Errorf("%s; not caching", err) + } + + // be very strict about memory guarantees + if (d.cacheSize + valueSize) > d.CacheSizeMax { + panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) + } + + d.cache[key] = val + d.cacheSize += valueSize + return nil +} + +// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. +func (d *Diskv) cacheWithoutLock(key string, val []byte) error { + d.mu.Lock() + defer d.mu.Unlock() + return d.cacheWithLock(key, val) +} + +func (d *Diskv) bustCacheWithLock(key string) { + if val, ok := d.cache[key]; ok { + d.uncacheWithLock(key, uint64(len(val))) + } +} + +func (d *Diskv) uncacheWithLock(key string, sz uint64) { + d.cacheSize -= sz + delete(d.cache, key) +} + +// pruneDirsWithLock deletes empty directories in the path walk leading to the +// key k. Typically this function is called after an Erase is made. +func (d *Diskv) pruneDirsWithLock(key string) error { + pathlist := d.transform(key).Path + for i := range pathlist { + dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) + + // thanks to Steven Blenkinsop for this snippet + switch fi, err := os.Stat(dir); true { + case err != nil: + return err + case !fi.IsDir(): + panic(fmt.Sprintf("corrupt dirstate at %s", dir)) + } + + nlinks, err := filepath.Glob(filepath.Join(dir, "*")) + if err != nil { + return err + } else if len(nlinks) > 0 { + return nil // has subdirs -- do not prune + } + if err = os.Remove(dir); err != nil { + return err + } + } + + return nil +} + +// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order +// until the cache has at least valueSize bytes available. +func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { + if valueSize > d.CacheSizeMax { + return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) + } + + safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } + + for key, val := range d.cache { + if safe() { + break + } + + d.uncacheWithLock(key, uint64(len(val))) + } + + if !safe() { + panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) + } + + return nil +} + +// nopWriteCloser wraps an io.Writer and provides a no-op Close method to +// satisfy the io.WriteCloser interface. +type nopWriteCloser struct { + io.Writer +} + +func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } +func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go new file mode 100644 index 000000000..96fee5152 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/index.go @@ -0,0 +1,115 @@ +package diskv + +import ( + "sync" + + "github.com/google/btree" +) + +// Index is a generic interface for things that can +// provide an ordered list of keys. +type Index interface { + Initialize(less LessFunction, keys <-chan string) + Insert(key string) + Delete(key string) + Keys(from string, n int) []string +} + +// LessFunction is used to initialize an Index of keys in a specific order. +type LessFunction func(string, string) bool + +// btreeString is a custom data type that satisfies the BTree Less interface, +// making the strings it wraps sortable by the BTree package. +type btreeString struct { + s string + l LessFunction +} + +// Less satisfies the BTree.Less interface using the btreeString's LessFunction. +func (s btreeString) Less(i btree.Item) bool { + return s.l(s.s, i.(btreeString).s) +} + +// BTreeIndex is an implementation of the Index interface using google/btree. +type BTreeIndex struct { + sync.RWMutex + LessFunction + *btree.BTree +} + +// Initialize populates the BTree tree with data from the keys channel, +// according to the passed less function. It's destructive to the BTreeIndex. +func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { + i.Lock() + defer i.Unlock() + i.LessFunction = less + i.BTree = rebuild(less, keys) +} + +// Insert inserts the given key (only) into the BTree tree. +func (i *BTreeIndex) Insert(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) +} + +// Delete removes the given key (only) from the BTree tree. +func (i *BTreeIndex) Delete(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) +} + +// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, +// Keys will return the first n keys. If the passed 'from' key is non-empty, the +// first key in the returned slice will be the key that immediately follows the +// passed key, in key order. +func (i *BTreeIndex) Keys(from string, n int) []string { + i.RLock() + defer i.RUnlock() + + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + + if i.BTree.Len() <= 0 { + return []string{} + } + + btreeFrom := btreeString{s: from, l: i.LessFunction} + skipFirst := true + if len(from) <= 0 || !i.BTree.Has(btreeFrom) { + // no such key, so fabricate an always-smallest item + btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} + skipFirst = false + } + + keys := []string{} + iterator := func(i btree.Item) bool { + keys = append(keys, i.(btreeString).s) + return len(keys) < n + } + i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) + + if skipFirst && len(keys) > 0 { + keys = keys[1:] + } + + return keys +} + +// rebuildIndex does the work of regenerating the index +// with the given keys. +func rebuild(less LessFunction, keys <-chan string) *btree.BTree { + tree := btree.New(2) + for key := range keys { + tree.ReplaceOrInsert(btreeString{s: key, l: less}) + } + return tree +} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 000000000..dd878a30e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 000000000..288f0e854 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go new file mode 100644 index 000000000..6609e2877 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.12 + +package prometheus + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 000000000..1e839650d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. + Collect(chan<- Metric) +} + +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 000000000..d463e36d3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 000000000..1d034f871 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(labelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 000000000..01977de66 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. Alternatively, you +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still +// be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 000000000..18a99d5fa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 000000000..3d383a735 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 000000000..71d406bd9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 000000000..dc9247fed --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,396 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "runtime" + "runtime/debug" + "sync" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return "go_memstats_" + s +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.msMetrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 000000000..d7ea67bd2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,586 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogramCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + + upperBounds []float64 + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if i < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[i], 1) + } + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + } + + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 000000000..351c26e1a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 000000000..2744443ac --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 000000000..55e6d86d5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,174 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + "time" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair + +func (s labelPairSorter) Len() int { + return len(s) +} + +func (s labelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s labelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 000000000..5806cd09e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 000000000..9b8097942 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,151 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "os" +) + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } + + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn + } + + // Set up process metric collection if supported by the runtime. + if canCollectProcess() { + c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } + } + + return c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 000000000..3117461cd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 000000000..e0b935d1f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,112 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 + PrivateUsage uint64 +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 000000000..fa535684f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,357 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 000000000..cea5a90fd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,349 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + } + + if lastErr != nil { + httpError(rsp, lastErr) + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 000000000..83c49b66a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 000000000..9db243805 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 000000000..6c32516aa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,945 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe methed does not yield any descriptors) are excluded from the check. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + uncheckedCollectors []Collector + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // A Collector yielding no Desc at all is considered unchecked. + if len(newDescIDs) == 0 { + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil + } + if existing, exists := r.collectorsByID[collectorID]; exists { + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) + for _, collector := range r.collectorsByID { + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. + defer func() { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } + } + }() + + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + } + break + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calls are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } + } + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) + } + } + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } + } + return nil +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, +) error { + name := metricFamily.GetName() + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), + ) + } + + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := hashNew() + h = hashAdd(h, name) + h = hashAddByte(h, separatorByte) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetName()) + h = hashAddByte(h, separatorByte) + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, + ) + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(labelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 000000000..c970fdee0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,736 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Problem with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + } + s.init(s) // Init self-collection. + return s + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 000000000..8d5f10523 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 000000000..0f9ce63f4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 000000000..eb248f108 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 000000000..14ed9e856 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,472 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 000000000..e303eef6d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,200 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 000000000..20110e410 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 000000000..9805432c2 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,629 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +package io_prometheus_client // import "github.com/prometheus/client_model/go" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (dst *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(dst, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (dst *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(dst, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (dst *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(dst, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (dst *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(dst, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (dst *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(dst, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (dst *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(dst, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (dst *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(dst, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (dst *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(dst, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (dst *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(dst, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (dst *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(dst, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } + +var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ + // 591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, + 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, + 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, + 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, + 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, + 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, + 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, + 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, + 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, + 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, + 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, + 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, + 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, + 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, + 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, + 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, + 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, + 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, + 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, + 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, + 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, + 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, + 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, + 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, + 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, + 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, + 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, + 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, + 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, + 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, + 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, + 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, + 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, + 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, + 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, + 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, + 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE new file mode 100644 index 000000000..11069edd7 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 000000000..636a2c1a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 000000000..c092723e8 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurrs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 000000000..11839ed65 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 000000000..c71bcb981 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 000000000..dc2eedeef --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 000000000..8e473d0fe --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,468 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + "sync" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialBufSize = 512 + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { + // Fail-fast checks. + if len(in.Metric) == 0 { + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bytes.Buffer from the sync.Pool and write out its content to out in a + // single go in the end. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bytes.Buffer) + b.Reset() + w = b + defer func() { + bWritten, bErr := out.Write(b.Bytes()) + written = bWritten + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Summary.GetSampleCount()), + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + float64(metric.Histogram.GetSampleCount()), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Histogram.GetSampleCount()), + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// writeSample writes a single sample in text format to w, given the metric +// name, the metric proto message itself, optionally an additional label name +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. +func writeSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + value float64, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +var ( + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { + if includeDoubleQuote { + return quotedEscaper.WriteString(w, v) + } else { + return escaper.WriteString(w, v) + } +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 000000000..ec3d86ba7 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 000000000..26e92288c --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 000000000..35e739c7a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 000000000..fc4de4106 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 000000000..038fc1c90 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 000000000..41051a01a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 000000000..6eda08a73 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 000000000..00804b7fe --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,102 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 000000000..a7b969170 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 000000000..8762b13c6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 000000000..bb99889d2 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 000000000..7b0064fdb --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,270 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes an interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 000000000..c9d8fb1a2 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 000000000..53c5e9aa1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..63d4229a4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 000000000..e2acd6d40 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 000000000..0102ab0fd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "github.com/prometheus/procfs/internal/fs" +) + +// FS represents the pseudo-filesystem sys, which provides an interface to +// kernel data structures. +type FS struct { + proc fs.FS +} + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint + +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) +} + +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) + if err != nil { + return FS{}, err + } + return FS{fs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 000000000..7ddfd6b6e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the commont mount point of the configfs + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 000000000..2d6cb8d1c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,239 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 000000000..2af3ada18 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,194 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device requires. + DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Spare disks in the device. + DisksSpare int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} + +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { + continue + } + + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + if len(lines) <= i+3 { + return nil, fmt.Errorf( + "error parsing %s: too few lines for md device", + mdName, + ) + } + + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + + if err != nil { + return nil, fmt.Errorf("error parsing md device lines: %s", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + + // Append recovery and resyncing state info. + if recovering || resyncing { + if recovering { + state = "recovering" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) + } + } + } + + mdStats = append(mdStats, MDStat{ + Name: mdName, + ActivityState: state, + DisksActive: active, + DisksFailed: fail, + DisksSpare: spare, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStats, nil +} + +func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { + + sizeStr := strings.Fields(statusLine)[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + return active, total, size, nil +} + +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { + matches := recoveryLineRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 000000000..61fa61887 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,178 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +var validOptionalFields = map[string]bool{ + "shared": true, + "master": true, + "propagate_from": true, + "unbindable": true, +} + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique Id for the mount + MountId int + // The Id of the parent mount + ParentId int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Returns part of the mountinfo line, if it exists, else an empty string. +func getStringSliceElement(parts []string, idx int, defaultValue string) string { + if idx >= len(parts) { + return defaultValue + } + return parts[idx] +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(r io.Reader) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(r) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + // OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots + separatorIndex := strings.Index(mountString, "-") + if separatorIndex == -1 { + return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString) + } + beforeFields := strings.Fields(mountString[:separatorIndex]) + afterFields := strings.Fields(mountString[separatorIndex+1:]) + if (len(beforeFields) + len(afterFields)) < 7 { + return nil, fmt.Errorf("too few fields") + } + + mount := &MountInfo{ + MajorMinorVer: getStringSliceElement(beforeFields, 2, ""), + Root: getStringSliceElement(beforeFields, 3, ""), + MountPoint: getStringSliceElement(beforeFields, 4, ""), + Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")), + OptionalFields: nil, + FSType: getStringSliceElement(afterFields, 0, ""), + Source: getStringSliceElement(afterFields, 1, ""), + SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")), + } + + mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, "")) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, "")) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if len(beforeFields) > 6 { + mount.OptionalFields = make(map[string]string) + optionalFields := beforeFields[6:] + for _, field := range optionalFields { + optionSplit := strings.Split(field, ":") + target, value := optionSplit[0], "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + // Checks if the 'keys' in the optional fields in the mountinfo line are acceptable. + // Allowed 'keys' are shared, master, propagate_from, unbindable. + if _, ok := validOptionalFields[target]; ok { + mount.OptionalFields[target] = value + } + } + } + return mount, nil +} + +// Parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// Retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + return parseMountInfo(f) +} + +// Retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + return parseMountInfo(f) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 000000000..35b2ef351 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,621 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The mount options of the NFS mount. + Opts map[string]string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueMilliseconds uint64 + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseMilliseconds uint64 + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestMilliseconds uint64 +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTimeSeconds uint64 + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldOpts = "opts:" + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldOpts: + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + + return &NFSTransportStats{ + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTimeSeconds: ns[4], + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 000000000..a0b7a0119 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,206 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) +} + +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + netDev := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := netDev.parseLine(s.Text()) + if err != nil { + return netDev, err + } + + netDev[line.Name] = *line + } + + return netDev, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (netDev NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 000000000..240340a83 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,275 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +const ( + netUnixKernelPtrIdx = iota + netUnixRefCountIdx + _ + netUnixFlagsIdx + netUnixTypeIdx + netUnixStateIdx + netUnixInodeIdx + + // Inode and Path are optional. + netUnixStaticFieldsCnt = 6 +) + +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") + +// NetUnixType is the type of the type field. +type NetUnixType uint64 + +// NetUnixFlags is the type of the flags field. +type NetUnixFlags uint64 + +// NetUnixState is the type of the state field. +type NetUnixState uint64 + +// NetUnixLine represents a line of /proc/net/unix. +type NetUnixLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUnixFlags + Type NetUnixType + State NetUnixState + Inode uint64 + Path string +} + +// NetUnix holds the data read from /proc/net/unix. +type NetUnix struct { + Rows []*NetUnixLine +} + +// NewNetUnix returns data read from /proc/net/unix. +func NewNetUnix() (*NetUnix, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetUnix() +} + +// NewNetUnix returns data read from /proc/net/unix. +func (fs FS) NewNetUnix() (*NetUnix, error) { + return NewNetUnixByPath(fs.proc.Path("net/unix")) +} + +// NewNetUnixByPath returns data read from /proc/net/unix by file path. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByPath(path string) (*NetUnix, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return NewNetUnixByReader(f) +} + +// NewNetUnixByReader returns data read from /proc/net/unix by a reader. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { + nu := &NetUnix{ + Rows: make([]*NetUnixLine, 0, 32), + } + scanner := bufio.NewScanner(reader) + // Omit the header line. + scanner.Scan() + header := scanner.Text() + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. + // This code works for both cases. + hasInode := strings.Contains(header, "Inode") + + minFieldsCnt := netUnixStaticFieldsCnt + if hasInode { + minFieldsCnt++ + } + for scanner.Scan() { + line := scanner.Text() + item, err := nu.parseLine(line, hasInode, minFieldsCnt) + if err != nil { + return nu, err + } + nu.Rows = append(nu.Rows, item) + } + + return nu, scanner.Err() +} + +func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { + fields := strings.Fields(line) + fieldsLen := len(fields) + if fieldsLen < minFieldsCnt { + return nil, fmt.Errorf( + "Parse Unix domain failed: expect at least %d fields but got %d", + minFieldsCnt, fieldsLen) + } + kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) + } + users, err := u.parseUsers(fields[netUnixRefCountIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) + } + flags, err := u.parseFlags(fields[netUnixFlagsIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) + } + typ, err := u.parseType(fields[netUnixTypeIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) + } + state, err := u.parseState(fields[netUnixStateIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) + } + var inode uint64 + if hasInode { + inodeStr := fields[netUnixInodeIdx] + inode, err = u.parseInode(inodeStr) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) + } + } + + nuLine := &NetUnixLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if fieldsLen > minFieldsCnt { + pathIdx := netUnixInodeIdx + 1 + if !hasInode { + pathIdx-- + } + nuLine.Path = fields[pathIdx] + } + + return nuLine, nil +} + +func (u NetUnix) parseKernelPtr(str string) (string, error) { + if !strings.HasSuffix(str, ":") { + return "", errInvalidKernelPtrFmt + } + return str[:len(str)-1], nil +} + +func (u NetUnix) parseUsers(hexStr string) (uint64, error) { + return strconv.ParseUint(hexStr, 16, 32) +} + +func (u NetUnix) parseProtocol(hexStr string) (uint64, error) { + return strconv.ParseUint(hexStr, 16, 32) +} + +func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { + typ, err := strconv.ParseUint(hexStr, 16, 16) + if err != nil { + return 0, err + } + return NetUnixType(typ), nil +} + +func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { + flags, err := strconv.ParseUint(hexStr, 16, 32) + if err != nil { + return 0, err + } + return NetUnixFlags(flags), nil +} + +func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { + st, err := strconv.ParseInt(hexStr, 16, 8) + if err != nil { + return 0, err + } + return NetUnixState(st), nil +} + +func (u NetUnix) parseInode(inodeStr string) (uint64, error) { + return strconv.ParseUint(inodeStr, 10, 64) +} + +func (t NetUnixType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUnixFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUnixState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 000000000..41c148d06 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,281 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs fs.FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.proc.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead +func (fs FS) NewProc(pid int) (Proc, error) { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs.proc}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.proc.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs.proc}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + f, err := os.Open(p.path("mountinfo")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountInfo(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 000000000..7172bb586 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "io/ioutil" + "os" + "strings" +) + +// Environ reads process environments from /proc//environ +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + f, err := os.Open(p.path("environ")) + if err != nil { + return environments, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 000000000..0ff89b1ce --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 000000000..91ee24df8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,157 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead +func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 000000000..c66740ff7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// Namespaces reads from /proc//ns/* to get the namespaces of which the +// process is a member. +func (p Proc) Namespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 000000000..46fe26626 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,101 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) + } + + defer file.Close() + return parsePSIStats(resource, file) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, file io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + stats, err := ioutil.ReadAll(file) + if err != nil { + return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource) + } + + for _, l := range strings.Split(string(stats), "\n") { + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 000000000..dbde1fa0d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,198 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + + "github.com/prometheus/procfs/internal/fs" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize uint + // Resident set size in pages. + RSS int + + proc fs.FS +} + +// NewStat returns the current status information of the process. +// +// Deprecated: use p.Stat() instead +func (p Proc) NewStat() (ProcStat, error) { + return p.Stat() +} + +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, proc: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() uint { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + fs := FS{proc: s.proc} + stat, err := fs.Stat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 000000000..6b4b61f71 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Peak virtual memory size. + VmPeak uint64 + // Virtual memory size. + VmSize uint64 + // Locked memory size. + VmLck uint64 + // Pinned memory size. + VmPin uint64 + // Peak resident set size. + VmHWM uint64 + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 + // Size of resident anonymous memory. + RssAnon uint64 + // Size of resident file mappings. + RssFile uint64 + // Size of resident shared memory. + RssShmem uint64 + // Size of data segments. + VmData uint64 + // Size of stack segments. + VmStk uint64 + // Size of text segments. + VmExe uint64 + // Shared library code size. + VmLib uint64 + // Page table entries size. + VmPTE uint64 + // Size of second-level page tables. + VmPMD uint64 + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + f, err := os.Open(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Name": + s.Name = vString + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 000000000..6661ee03a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,244 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func (fs FS) NewStat() (Stat, error) { + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + + f, err := os.Open(fs.proc.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..30aa417d5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldn't parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index ba591d4fe..f9521ee45 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -19,6 +19,7 @@ package config import ( "fmt" "strconv" + "time" corev1 "k8s.io/api/core/v1" ) @@ -27,6 +28,7 @@ const ( // ConfigName is the name of the configmap DefaultsConfigName = "config-defaults" DefaultTimeoutMinutes = 60 + NoTimeoutDuration = 0 * time.Minute defaultTimeoutMinutesKey = "default-timeout-minutes" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go index b9be3f53a..8abb6b777 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package config diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go index ce6f6171d..2f9ad98b8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go @@ -33,7 +33,7 @@ func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { if err := validateObjectMetadata(p.GetObjectMeta()); err != nil { return err.ViaField("metadata") } - return nil + return p.Spec.Validate(ctx) } func validateDeclaredResources(ps *PipelineSpec) error { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go index d4b937265..f59113cef 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go @@ -62,6 +62,11 @@ type PipelineRunSpec struct { // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` + + // PodTemplate holds pod specific configuration + PodTemplate PodTemplate `json:"podTemplate,omitempty"` + + // FIXME(vdemeester) Deprecated // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go index fba755b9c..0a64d38b8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go @@ -51,8 +51,8 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) *apis.FieldError { if ps.Timeout != nil { // timeout should be a valid duration of at least 0. - if ps.Timeout.Duration <= 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be > 0", ps.Timeout.Duration.String()), "spec.timeout") + if ps.Timeout.Duration < 0 { + return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ps.Timeout.Duration.String()), "spec.timeout") } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go new file mode 100644 index 000000000..3deca0768 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Tekton Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// PodTemplate holds pod specific configuration +type PodTemplate struct { + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // If specified, the pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // If specified, the pod's scheduling constraints + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` +} + +// CombinePodTemplate takes a PodTemplate (either from TaskRun or PipelineRun) and merge it with deprecated field that were inlined. +func CombinedPodTemplate(template PodTemplate, deprecatedNodeSelector map[string]string, deprecatedTolerations []corev1.Toleration, deprecatedAffinity *corev1.Affinity) PodTemplate { + if len(template.NodeSelector) == 0 && len(deprecatedNodeSelector) != 0 { + template.NodeSelector = deprecatedNodeSelector + } + if len(template.Tolerations) == 0 && len(deprecatedTolerations) != 0 { + template.Tolerations = deprecatedTolerations + } + if template.Affinity == nil && deprecatedAffinity != nil { + template.Affinity = deprecatedAffinity + } + return template +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go index 4f4968bdb..dc51212ff 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go @@ -53,6 +53,11 @@ type TaskRunSpec struct { // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` + + // PodTemplate holds pod specific configuration + PodTemplate PodTemplate `json:"podTemplate,omitempty"` + + // FIXME(vdemeester) Deprecated // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go index 08cc6a2cb..755a281d6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go @@ -66,6 +66,13 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) *apis.FieldError { } } + if ts.Timeout != nil { + // timeout should be a valid duration of at least 0. + if ts.Timeout.Duration < 0 { + return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ts.Timeout.Duration.String()), "spec.timeout") + } + } + return nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index d95226a45..e3866d797 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -16,13 +16,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 import ( v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -52,12 +52,8 @@ func (in *ArtifactPVC) DeepCopyInto(out *ArtifactPVC) { *out = *in if in.PersistentVolumeClaim != nil { in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - if *in == nil { - *out = nil - } else { - *out = new(v1.PersistentVolumeClaim) - (*in).DeepCopyInto(*out) - } + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) } return } @@ -181,12 +177,15 @@ func (in *DAG) DeepCopyInto(out *DAG) { in, out := &in.Nodes, &out.Nodes *out = make(map[string]*Node, len(*in)) for key, val := range *in { + var outVal *Node if val == nil { (*out)[key] = nil } else { - (*out)[key] = new(Node) - val.DeepCopyInto((*out)[key]) + in, out := &val, &outVal + *out = new(Node) + (*in).DeepCopyInto(*out) } + (*out)[key] = outVal } } return @@ -289,11 +288,10 @@ func (in *Node) DeepCopyInto(out *Node) { in, out := &in.Prev, &out.Prev *out = make([]*Node, len(*in)) for i := range *in { - if (*in)[i] == nil { - (*out)[i] = nil - } else { - (*out)[i] = new(Node) - (*in)[i].DeepCopyInto((*out)[i]) + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + (*in).DeepCopyInto(*out) } } } @@ -301,11 +299,10 @@ func (in *Node) DeepCopyInto(out *Node) { in, out := &in.Next, &out.Next *out = make([]*Node, len(*in)) for i := range *in { - if (*in)[i] == nil { - (*out)[i] = nil - } else { - (*out)[i] = new(Node) - (*in)[i].DeepCopyInto((*out)[i]) + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + (*in).DeepCopyInto(*out) } } } @@ -707,22 +704,15 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { } if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Duration) - **out = **in - } + *out = new(metav1.Duration) + **out = **in } + in.PodTemplate.DeepCopyInto(&out.PodTemplate) if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string, len(*in)) @@ -739,12 +729,8 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) } return } @@ -781,41 +767,30 @@ func (in *PipelineRunStatus) DeepCopyInto(out *PipelineRunStatus) { in.Status.DeepCopyInto(&out.Status) if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.CompletionTime != nil { in, out := &in.CompletionTime, &out.CompletionTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.TaskRuns != nil { in, out := &in.TaskRuns, &out.TaskRuns *out = make(map[string]*PipelineRunTaskRunStatus, len(*in)) for key, val := range *in { + var outVal *PipelineRunTaskRunStatus if val == nil { (*out)[key] = nil } else { - (*out)[key] = new(PipelineRunTaskRunStatus) - val.DeepCopyInto((*out)[key]) + in, out := &val, &outVal + *out = new(PipelineRunTaskRunStatus) + (*in).DeepCopyInto(*out) } + (*out)[key] = outVal } } return @@ -836,12 +811,8 @@ func (in *PipelineRunTaskRunStatus) DeepCopyInto(out *PipelineRunTaskRunStatus) *out = *in if in.Status != nil { in, out := &in.Status, &out.Status - if *in == nil { - *out = nil - } else { - *out = new(TaskRunStatus) - (*in).DeepCopyInto(*out) - } + *out = new(TaskRunStatus) + (*in).DeepCopyInto(*out) } return } @@ -916,12 +887,8 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { } if in.Resources != nil { in, out := &in.Resources, &out.Resources - if *in == nil { - *out = nil - } else { - *out = new(PipelineTaskResources) - (*in).DeepCopyInto(*out) - } + *out = new(PipelineTaskResources) + (*in).DeepCopyInto(*out) } if in.Params != nil { in, out := &in.Params, &out.Params @@ -1038,6 +1005,53 @@ func (in *PipelineTaskRun) DeepCopy() *PipelineTaskRun { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PullRequestResource) DeepCopyInto(out *PullRequestResource) { *out = *in @@ -1206,12 +1220,8 @@ func (in *TaskResourceBinding) DeepCopyInto(out *TaskResourceBinding) { out.ResourceRef = in.ResourceRef if in.ResourceSpec != nil { in, out := &in.ResourceSpec, &out.ResourceSpec - if *in == nil { - *out = nil - } else { - *out = new(PipelineResourceSpec) - (*in).DeepCopyInto(*out) - } + *out = new(PipelineResourceSpec) + (*in).DeepCopyInto(*out) } if in.Paths != nil { in, out := &in.Paths, &out.Paths @@ -1350,40 +1360,25 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { in.Outputs.DeepCopyInto(&out.Outputs) if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.TaskRef != nil { in, out := &in.TaskRef, &out.TaskRef - if *in == nil { - *out = nil - } else { - *out = new(TaskRef) - **out = **in - } + *out = new(TaskRef) + **out = **in } if in.TaskSpec != nil { in, out := &in.TaskSpec, &out.TaskSpec - if *in == nil { - *out = nil - } else { - *out = new(TaskSpec) - (*in).DeepCopyInto(*out) - } + *out = new(TaskSpec) + (*in).DeepCopyInto(*out) } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Duration) - **out = **in - } + *out = new(metav1.Duration) + **out = **in } + in.PodTemplate.DeepCopyInto(&out.PodTemplate) if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string, len(*in)) @@ -1400,12 +1395,8 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) } return } @@ -1426,30 +1417,16 @@ func (in *TaskRunStatus) DeepCopyInto(out *TaskRunStatus) { in.Status.DeepCopyInto(&out.Status) if in.Results != nil { in, out := &in.Results, &out.Results - if *in == nil { - *out = nil - } else { - *out = new(Results) - **out = **in - } + *out = new(Results) + **out = **in } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.CompletionTime != nil { in, out := &in.CompletionTime, &out.CompletionTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.Steps != nil { in, out := &in.Steps, &out.Steps @@ -1488,21 +1465,13 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { *out = *in if in.Inputs != nil { in, out := &in.Inputs, &out.Inputs - if *in == nil { - *out = nil - } else { - *out = new(Inputs) - (*in).DeepCopyInto(*out) - } + *out = new(Inputs) + (*in).DeepCopyInto(*out) } if in.Outputs != nil { in, out := &in.Outputs, &out.Outputs - if *in == nil { - *out = nil - } else { - *out = new(Outputs) - (*in).DeepCopyInto(*out) - } + *out = new(Outputs) + (*in).DeepCopyInto(*out) } if in.Steps != nil { in, out := &in.Steps, &out.Steps @@ -1520,21 +1489,13 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { } if in.StepTemplate != nil { in, out := &in.StepTemplate, &out.StepTemplate - if *in == nil { - *out = nil - } else { - *out = new(v1.Container) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Container) + (*in).DeepCopyInto(*out) } if in.ContainerTemplate != nil { in, out := &in.ContainerTemplate, &out.ContainerTemplate - if *in == nil { - *out = nil - } else { - *out = new(v1.Container) - (*in).DeepCopyInto(*out) - } + *out = new(v1.Container) + (*in).DeepCopyInto(*out) } return } diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go similarity index 70% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go index c2052dfd4..4f2e5dbfd 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/clientset.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,27 +19,35 @@ limitations under the License. package versioned import ( + tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" - examplev1 "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1" ) type Interface interface { Discovery() discovery.DiscoveryInterface - ExampleV1() examplev1.ExampleV1Interface + TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Tekton() tektonv1alpha1.TektonV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - exampleV1 *examplev1.ExampleV1Client + tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client } -// ExampleV1 retrieves the ExampleV1Client -func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { - return c.exampleV1 +// TektonV1alpha1 retrieves the TektonV1alpha1Client +func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface { + return c.tektonV1alpha1 +} + +// Deprecated: Tekton retrieves the default version of TektonClient. +// Please explicitly pick a version. +func (c *Clientset) Tekton() tektonv1alpha1.TektonV1alpha1Interface { + return c.tektonV1alpha1 } // Discovery retrieves the DiscoveryClient @@ -58,7 +66,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.exampleV1, err = examplev1.NewForConfig(&configShallowCopy) + cs.tektonV1alpha1, err = tektonv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -74,7 +82,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset - cs.exampleV1 = examplev1.NewForConfigOrDie(c) + cs.tektonV1alpha1 = tektonv1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -83,7 +91,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.exampleV1 = examplev1.New(c) + cs.tektonV1alpha1 = tektonv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go similarity index 95% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go index 41721ca52..c166b8dc1 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go similarity index 95% rename from vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go index 7dc375616..c543eeb57 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..b59db5f18 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + tektonv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go new file mode 100644 index 000000000..b92785315 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go @@ -0,0 +1,147 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterTasksGetter has a method to return a ClusterTaskInterface. +// A group's client should implement this interface. +type ClusterTasksGetter interface { + ClusterTasks() ClusterTaskInterface +} + +// ClusterTaskInterface has methods to work with ClusterTask resources. +type ClusterTaskInterface interface { + Create(*v1alpha1.ClusterTask) (*v1alpha1.ClusterTask, error) + Update(*v1alpha1.ClusterTask) (*v1alpha1.ClusterTask, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterTask, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterTaskList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterTask, err error) + ClusterTaskExpansion +} + +// clusterTasks implements ClusterTaskInterface +type clusterTasks struct { + client rest.Interface +} + +// newClusterTasks returns a ClusterTasks +func newClusterTasks(c *TektonV1alpha1Client) *clusterTasks { + return &clusterTasks{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTask, and returns the corresponding clusterTask object, and an error if there is any. +func (c *clusterTasks) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Get(). + Resource("clustertasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTasks that match those selectors. +func (c *clusterTasks) List(opts v1.ListOptions) (result *v1alpha1.ClusterTaskList, err error) { + result = &v1alpha1.ClusterTaskList{} + err = c.client.Get(). + Resource("clustertasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTasks. +func (c *clusterTasks) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clustertasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a clusterTask and creates it. Returns the server's representation of the clusterTask, and an error, if there is any. +func (c *clusterTasks) Create(clusterTask *v1alpha1.ClusterTask) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Post(). + Resource("clustertasks"). + Body(clusterTask). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterTask and updates it. Returns the server's representation of the clusterTask, and an error, if there is any. +func (c *clusterTasks) Update(clusterTask *v1alpha1.ClusterTask) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Put(). + Resource("clustertasks"). + Name(clusterTask.Name). + Body(clusterTask). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterTask and deletes it. Returns an error if one occurs. +func (c *clusterTasks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertasks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTasks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clustertasks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterTask. +func (c *clusterTasks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Patch(pt). + Resource("clustertasks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go similarity index 95% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go index df51baa4d..8151c2ea6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2019 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..915b82f91 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterTaskExpansion interface{} + +type PipelineExpansion interface{} + +type PipelineResourceExpansion interface{} + +type PipelineRunExpansion interface{} + +type TaskExpansion interface{} + +type TaskRunExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go new file mode 100644 index 000000000..a5292b53f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelinesGetter has a method to return a PipelineInterface. +// A group's client should implement this interface. +type PipelinesGetter interface { + Pipelines(namespace string) PipelineInterface +} + +// PipelineInterface has methods to work with Pipeline resources. +type PipelineInterface interface { + Create(*v1alpha1.Pipeline) (*v1alpha1.Pipeline, error) + Update(*v1alpha1.Pipeline) (*v1alpha1.Pipeline, error) + UpdateStatus(*v1alpha1.Pipeline) (*v1alpha1.Pipeline, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Pipeline, error) + List(opts v1.ListOptions) (*v1alpha1.PipelineList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Pipeline, err error) + PipelineExpansion +} + +// pipelines implements PipelineInterface +type pipelines struct { + client rest.Interface + ns string +} + +// newPipelines returns a Pipelines +func newPipelines(c *TektonV1alpha1Client, namespace string) *pipelines { + return &pipelines{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. +func (c *pipelines) Get(name string, options v1.GetOptions) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pipelines that match those selectors. +func (c *pipelines) List(opts v1.ListOptions) (result *v1alpha1.PipelineList, err error) { + result = &v1alpha1.PipelineList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelines. +func (c *pipelines) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. +func (c *pipelines) Create(pipeline *v1alpha1.Pipeline) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelines"). + Body(pipeline). + Do(). + Into(result) + return +} + +// Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. +func (c *pipelines) Update(pipeline *v1alpha1.Pipeline) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelines"). + Name(pipeline.Name). + Body(pipeline). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *pipelines) UpdateStatus(pipeline *v1alpha1.Pipeline) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelines"). + Name(pipeline.Name). + SubResource("status"). + Body(pipeline). + Do(). + Into(result) + return +} + +// Delete takes name of the pipeline and deletes it. Returns an error if one occurs. +func (c *pipelines) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelines"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pipeline. +func (c *pipelines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelines"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go new file mode 100644 index 000000000..e8202b4da --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go @@ -0,0 +1,115 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type TektonV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterTasksGetter + PipelinesGetter + PipelineResourcesGetter + PipelineRunsGetter + TasksGetter + TaskRunsGetter +} + +// TektonV1alpha1Client is used to interact with features provided by the tekton.dev group. +type TektonV1alpha1Client struct { + restClient rest.Interface +} + +func (c *TektonV1alpha1Client) ClusterTasks() ClusterTaskInterface { + return newClusterTasks(c) +} + +func (c *TektonV1alpha1Client) Pipelines(namespace string) PipelineInterface { + return newPipelines(c, namespace) +} + +func (c *TektonV1alpha1Client) PipelineResources(namespace string) PipelineResourceInterface { + return newPipelineResources(c, namespace) +} + +func (c *TektonV1alpha1Client) PipelineRuns(namespace string) PipelineRunInterface { + return newPipelineRuns(c, namespace) +} + +func (c *TektonV1alpha1Client) Tasks(namespace string) TaskInterface { + return newTasks(c, namespace) +} + +func (c *TektonV1alpha1Client) TaskRuns(namespace string) TaskRunInterface { + return newTaskRuns(c, namespace) +} + +// NewForConfig creates a new TektonV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*TektonV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &TektonV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new TektonV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *TektonV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new TektonV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *TektonV1alpha1Client { + return &TektonV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *TektonV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go new file mode 100644 index 000000000..64761fec2 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelineresource.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelineResourcesGetter has a method to return a PipelineResourceInterface. +// A group's client should implement this interface. +type PipelineResourcesGetter interface { + PipelineResources(namespace string) PipelineResourceInterface +} + +// PipelineResourceInterface has methods to work with PipelineResource resources. +type PipelineResourceInterface interface { + Create(*v1alpha1.PipelineResource) (*v1alpha1.PipelineResource, error) + Update(*v1alpha1.PipelineResource) (*v1alpha1.PipelineResource, error) + UpdateStatus(*v1alpha1.PipelineResource) (*v1alpha1.PipelineResource, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PipelineResource, error) + List(opts v1.ListOptions) (*v1alpha1.PipelineResourceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PipelineResource, err error) + PipelineResourceExpansion +} + +// pipelineResources implements PipelineResourceInterface +type pipelineResources struct { + client rest.Interface + ns string +} + +// newPipelineResources returns a PipelineResources +func newPipelineResources(c *TektonV1alpha1Client, namespace string) *pipelineResources { + return &pipelineResources{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipelineResource, and returns the corresponding pipelineResource object, and an error if there is any. +func (c *pipelineResources) Get(name string, options v1.GetOptions) (result *v1alpha1.PipelineResource, err error) { + result = &v1alpha1.PipelineResource{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineresources"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PipelineResources that match those selectors. +func (c *pipelineResources) List(opts v1.ListOptions) (result *v1alpha1.PipelineResourceList, err error) { + result = &v1alpha1.PipelineResourceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelineResources. +func (c *pipelineResources) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelineresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a pipelineResource and creates it. Returns the server's representation of the pipelineResource, and an error, if there is any. +func (c *pipelineResources) Create(pipelineResource *v1alpha1.PipelineResource) (result *v1alpha1.PipelineResource, err error) { + result = &v1alpha1.PipelineResource{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelineresources"). + Body(pipelineResource). + Do(). + Into(result) + return +} + +// Update takes the representation of a pipelineResource and updates it. Returns the server's representation of the pipelineResource, and an error, if there is any. +func (c *pipelineResources) Update(pipelineResource *v1alpha1.PipelineResource) (result *v1alpha1.PipelineResource, err error) { + result = &v1alpha1.PipelineResource{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineresources"). + Name(pipelineResource.Name). + Body(pipelineResource). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *pipelineResources) UpdateStatus(pipelineResource *v1alpha1.PipelineResource) (result *v1alpha1.PipelineResource, err error) { + result = &v1alpha1.PipelineResource{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineresources"). + Name(pipelineResource.Name). + SubResource("status"). + Body(pipelineResource). + Do(). + Into(result) + return +} + +// Delete takes name of the pipelineResource and deletes it. Returns an error if one occurs. +func (c *pipelineResources) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineresources"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelineResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineresources"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pipelineResource. +func (c *pipelineResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PipelineResource, err error) { + result = &v1alpha1.PipelineResource{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelineresources"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go new file mode 100644 index 000000000..52248eb38 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelineRunsGetter has a method to return a PipelineRunInterface. +// A group's client should implement this interface. +type PipelineRunsGetter interface { + PipelineRuns(namespace string) PipelineRunInterface +} + +// PipelineRunInterface has methods to work with PipelineRun resources. +type PipelineRunInterface interface { + Create(*v1alpha1.PipelineRun) (*v1alpha1.PipelineRun, error) + Update(*v1alpha1.PipelineRun) (*v1alpha1.PipelineRun, error) + UpdateStatus(*v1alpha1.PipelineRun) (*v1alpha1.PipelineRun, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PipelineRun, error) + List(opts v1.ListOptions) (*v1alpha1.PipelineRunList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PipelineRun, err error) + PipelineRunExpansion +} + +// pipelineRuns implements PipelineRunInterface +type pipelineRuns struct { + client rest.Interface + ns string +} + +// newPipelineRuns returns a PipelineRuns +func newPipelineRuns(c *TektonV1alpha1Client, namespace string) *pipelineRuns { + return &pipelineRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. +func (c *pipelineRuns) Get(name string, options v1.GetOptions) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. +func (c *pipelineRuns) List(opts v1.ListOptions) (result *v1alpha1.PipelineRunList, err error) { + result = &v1alpha1.PipelineRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelineRuns. +func (c *pipelineRuns) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Create(pipelineRun *v1alpha1.PipelineRun) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelineruns"). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Update(pipelineRun *v1alpha1.PipelineRun) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *pipelineRuns) UpdateStatus(pipelineRun *v1alpha1.PipelineRun) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + SubResource("status"). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. +func (c *pipelineRuns) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelineRuns) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pipelineRun. +func (c *pipelineRuns) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelineruns"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go new file mode 100644 index 000000000..8fd3c348d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TasksGetter has a method to return a TaskInterface. +// A group's client should implement this interface. +type TasksGetter interface { + Tasks(namespace string) TaskInterface +} + +// TaskInterface has methods to work with Task resources. +type TaskInterface interface { + Create(*v1alpha1.Task) (*v1alpha1.Task, error) + Update(*v1alpha1.Task) (*v1alpha1.Task, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Task, error) + List(opts v1.ListOptions) (*v1alpha1.TaskList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Task, err error) + TaskExpansion +} + +// tasks implements TaskInterface +type tasks struct { + client rest.Interface + ns string +} + +// newTasks returns a Tasks +func newTasks(c *TektonV1alpha1Client, namespace string) *tasks { + return &tasks{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the task, and returns the corresponding task object, and an error if there is any. +func (c *tasks) Get(name string, options v1.GetOptions) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Tasks that match those selectors. +func (c *tasks) List(opts v1.ListOptions) (result *v1alpha1.TaskList, err error) { + result = &v1alpha1.TaskList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested tasks. +func (c *tasks) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a task and creates it. Returns the server's representation of the task, and an error, if there is any. +func (c *tasks) Create(task *v1alpha1.Task) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Post(). + Namespace(c.ns). + Resource("tasks"). + Body(task). + Do(). + Into(result) + return +} + +// Update takes the representation of a task and updates it. Returns the server's representation of the task, and an error, if there is any. +func (c *tasks) Update(task *v1alpha1.Task) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Put(). + Namespace(c.ns). + Resource("tasks"). + Name(task.Name). + Body(task). + Do(). + Into(result) + return +} + +// Delete takes name of the task and deletes it. Returns an error if one occurs. +func (c *tasks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("tasks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *tasks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched task. +func (c *tasks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("tasks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go new file mode 100644 index 000000000..48d8774d6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TaskRunsGetter has a method to return a TaskRunInterface. +// A group's client should implement this interface. +type TaskRunsGetter interface { + TaskRuns(namespace string) TaskRunInterface +} + +// TaskRunInterface has methods to work with TaskRun resources. +type TaskRunInterface interface { + Create(*v1alpha1.TaskRun) (*v1alpha1.TaskRun, error) + Update(*v1alpha1.TaskRun) (*v1alpha1.TaskRun, error) + UpdateStatus(*v1alpha1.TaskRun) (*v1alpha1.TaskRun, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.TaskRun, error) + List(opts v1.ListOptions) (*v1alpha1.TaskRunList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TaskRun, err error) + TaskRunExpansion +} + +// taskRuns implements TaskRunInterface +type taskRuns struct { + client rest.Interface + ns string +} + +// newTaskRuns returns a TaskRuns +func newTaskRuns(c *TektonV1alpha1Client, namespace string) *taskRuns { + return &taskRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. +func (c *taskRuns) Get(name string, options v1.GetOptions) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. +func (c *taskRuns) List(opts v1.ListOptions) (result *v1alpha1.TaskRunList, err error) { + result = &v1alpha1.TaskRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested taskRuns. +func (c *taskRuns) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Create(taskRun *v1alpha1.TaskRun) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("taskruns"). + Body(taskRun). + Do(). + Into(result) + return +} + +// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Update(taskRun *v1alpha1.TaskRun) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + Body(taskRun). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *taskRuns) UpdateStatus(taskRun *v1alpha1.TaskRun) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + SubResource("status"). + Body(taskRun). + Do(). + Into(result) + return +} + +// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. +func (c *taskRuns) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *taskRuns) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched taskRun. +func (c *taskRuns) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("taskruns"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go new file mode 100644 index 000000000..5d9fa69ff --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + "context" + + injection "github.com/knative/pkg/injection" + logging "github.com/knative/pkg/logging" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + rest "k8s.io/client-go/rest" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Fatalf( + "Unable to fetch %T from context.", (versioned.Interface)(nil)) + } + return untyped.(versioned.Interface) +} diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go new file mode 100644 index 000000000..203bd38ad --- /dev/null +++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go @@ -0,0 +1,295 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus contains a Prometheus exporter that supports exporting +// OpenCensus views as Prometheus metrics. +package prometheus // import "go.opencensus.io/exporter/prometheus" + +import ( + "bytes" + "fmt" + "log" + "net/http" + "sync" + + "go.opencensus.io/internal" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// Exporter exports stats to Prometheus, users need +// to register the exporter as an http.Handler to be +// able to export. +type Exporter struct { + opts Options + g prometheus.Gatherer + c *collector + handler http.Handler +} + +// Options contains options for configuring the exporter. +type Options struct { + Namespace string + Registry *prometheus.Registry + OnError func(err error) + ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views. +} + +// NewExporter returns an exporter that exports stats to Prometheus. +func NewExporter(o Options) (*Exporter, error) { + if o.Registry == nil { + o.Registry = prometheus.NewRegistry() + } + collector := newCollector(o, o.Registry) + e := &Exporter{ + opts: o, + g: o.Registry, + c: collector, + handler: promhttp.HandlerFor(o.Registry, promhttp.HandlerOpts{}), + } + return e, nil +} + +var _ http.Handler = (*Exporter)(nil) +var _ view.Exporter = (*Exporter)(nil) + +func (c *collector) registerViews(views ...*view.View) { + count := 0 + for _, view := range views { + sig := viewSignature(c.opts.Namespace, view) + c.registeredViewsMu.Lock() + _, ok := c.registeredViews[sig] + c.registeredViewsMu.Unlock() + + if !ok { + desc := prometheus.NewDesc( + viewName(c.opts.Namespace, view), + view.Description, + tagKeysToLabels(view.TagKeys), + c.opts.ConstLabels, + ) + c.registeredViewsMu.Lock() + c.registeredViews[sig] = desc + c.registeredViewsMu.Unlock() + count++ + } + } + if count == 0 { + return + } + + c.ensureRegisteredOnce() +} + +// ensureRegisteredOnce invokes reg.Register on the collector itself +// exactly once to ensure that we don't get errors such as +// cannot register the collector: descriptor Desc{fqName: *} +// already exists with the same fully-qualified name and const label values +// which is documented by Prometheus at +// https://github.com/prometheus/client_golang/blob/fcc130e101e76c5d303513d0e28f4b6d732845c7/prometheus/registry.go#L89-L101 +func (c *collector) ensureRegisteredOnce() { + c.registerOnce.Do(func() { + if err := c.reg.Register(c); err != nil { + c.opts.onError(fmt.Errorf("cannot register the collector: %v", err)) + } + }) + +} + +func (o *Options) onError(err error) { + if o.OnError != nil { + o.OnError(err) + } else { + log.Printf("Failed to export to Prometheus: %v", err) + } +} + +// ExportView exports to the Prometheus if view data has one or more rows. +// Each OpenCensus AggregationData will be converted to +// corresponding Prometheus Metric: SumData will be converted +// to Untyped Metric, CountData will be a Counter Metric, +// DistributionData will be a Histogram Metric. +func (e *Exporter) ExportView(vd *view.Data) { + if len(vd.Rows) == 0 { + return + } + e.c.addViewData(vd) +} + +// ServeHTTP serves the Prometheus endpoint. +func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { + e.handler.ServeHTTP(w, r) +} + +// collector implements prometheus.Collector +type collector struct { + opts Options + mu sync.Mutex // mu guards all the fields. + + registerOnce sync.Once + + // reg helps collector register views dynamically. + reg *prometheus.Registry + + // viewData are accumulated and atomically + // appended to on every Export invocation, from + // stats. These views are cleared out when + // Collect is invoked and the cycle is repeated. + viewData map[string]*view.Data + + registeredViewsMu sync.Mutex + // registeredViews maps a view to a prometheus desc. + registeredViews map[string]*prometheus.Desc +} + +func (c *collector) addViewData(vd *view.Data) { + c.registerViews(vd.View) + sig := viewSignature(c.opts.Namespace, vd.View) + + c.mu.Lock() + c.viewData[sig] = vd + c.mu.Unlock() +} + +func (c *collector) Describe(ch chan<- *prometheus.Desc) { + c.registeredViewsMu.Lock() + registered := make(map[string]*prometheus.Desc) + for k, desc := range c.registeredViews { + registered[k] = desc + } + c.registeredViewsMu.Unlock() + + for _, desc := range registered { + ch <- desc + } +} + +// Collect fetches the statistics from OpenCensus +// and delivers them as Prometheus Metrics. +// Collect is invoked everytime a prometheus.Gatherer is run +// for example when the HTTP endpoint is invoked by Prometheus. +func (c *collector) Collect(ch chan<- prometheus.Metric) { + // We need a copy of all the view data up until this point. + viewData := c.cloneViewData() + + for _, vd := range viewData { + sig := viewSignature(c.opts.Namespace, vd.View) + c.registeredViewsMu.Lock() + desc := c.registeredViews[sig] + c.registeredViewsMu.Unlock() + + for _, row := range vd.Rows { + metric, err := c.toMetric(desc, vd.View, row) + if err != nil { + c.opts.onError(err) + } else { + ch <- metric + } + } + } + +} + +func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { + switch data := row.Data.(type) { + case *view.CountData: + return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags, v.TagKeys)...) + + case *view.DistributionData: + points := make(map[float64]uint64) + // Histograms are cumulative in Prometheus. + // Get cumulative bucket counts. + cumCount := uint64(0) + for i, b := range v.Aggregation.Buckets { + cumCount += uint64(data.CountPerBucket[i]) + points[b] = cumCount + } + return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags, v.TagKeys)...) + + case *view.SumData: + return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags, v.TagKeys)...) + + case *view.LastValueData: + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags, v.TagKeys)...) + + default: + return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) + } +} + +func tagKeysToLabels(keys []tag.Key) (labels []string) { + for _, key := range keys { + labels = append(labels, internal.Sanitize(key.Name())) + } + return labels +} + +func newCollector(opts Options, registrar *prometheus.Registry) *collector { + return &collector{ + reg: registrar, + opts: opts, + registeredViews: make(map[string]*prometheus.Desc), + viewData: make(map[string]*view.Data), + } +} + +func tagValues(t []tag.Tag, expectedKeys []tag.Key) []string { + var values []string + // Add empty string for all missing keys in the tags map. + idx := 0 + for _, t := range t { + for t.Key != expectedKeys[idx] { + idx++ + values = append(values, "") + } + values = append(values, t.Value) + idx++ + } + for idx < len(expectedKeys) { + idx++ + values = append(values, "") + } + return values +} + +func viewName(namespace string, v *view.View) string { + var name string + if namespace != "" { + name = namespace + "_" + } + return name + internal.Sanitize(v.Name) +} + +func viewSignature(namespace string, v *view.View) string { + var buf bytes.Buffer + buf.WriteString(viewName(namespace, v)) + for _, k := range v.TagKeys { + buf.WriteString("-" + k.Name()) + } + return buf.String() +} + +func (c *collector) cloneViewData() map[string]*view.Data { + c.mu.Lock() + defer c.mu.Unlock() + + viewDataCopy := make(map[string]*view.Data) + for sig, viewData := range c.viewData { + viewDataCopy[sig] = viewData + } + return viewDataCopy +} diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 000000000..9a638781c --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Now().Sub(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 000000000..de8ccf236 --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 000000000..41b2c3fc0 --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding // import "go.opencensus.io/internal/tagencoding" + +// Values represent the encoded buffer for the values. +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +// WriteValue is the helper method to encode Values from map[Key][]byte. +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to decode Values to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +// Bytes returns a reference to already written bytes in the Buffer. +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 000000000..073af7b47 --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +// LocalSpanStoreEnabled true if the local span store is enabled. +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go new file mode 100644 index 000000000..52a7b3bf8 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 000000000..cdbeef058 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,33 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 000000000..87c55b9c8 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,28 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 000000000..6ccdec583 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []string // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 000000000..7fe057b19 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 000000000..c3f8ec27b --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 000000000..b483a1371 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 000000000..ca1f39049 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 000000000..6cee9ed17 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 000000000..d2565f1e2 --- /dev/null +++ b/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.21.0" +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go new file mode 100644 index 000000000..a6c466ae8 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -0,0 +1,56 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + + "google.golang.org/grpc/stats" +) + +// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and +// traces. Use with gRPC clients only. +type ClientHandler struct { + // StartOptions allows configuring the StartOptions used to create new spans. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this handler. + StartOptions trace.StartOptions +} + +// HandleConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = c.traceTagRPC(ctx, rti) + ctx = c.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go new file mode 100644 index 000000000..abe978b67 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -0,0 +1,107 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ClientHandler: +var ( + ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) + ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) + ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) +) + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ClientSentBytesPerRPCView = &view.View{ + Measure: ClientSentBytesPerRPC, + Name: "grpc.io/client/sent_bytes_per_rpc", + Description: "Distribution of bytes sent per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientReceivedBytesPerRPCView = &view.View{ + Measure: ClientReceivedBytesPerRPC, + Name: "grpc.io/client/received_bytes_per_rpc", + Description: "Distribution of bytes received per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientRoundtripLatencyView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/roundtrip_latency", + Description: "Distribution of round-trip latency, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } + + ClientCompletedRPCsView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + Aggregation: view.Count(), + } + + ClientSentMessagesPerRPCView = &view.View{ + Measure: ClientSentMessagesPerRPC, + Name: "grpc.io/client/sent_messages_per_rpc", + Description: "Distribution of sent messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientReceivedMessagesPerRPCView = &view.View{ + Measure: ClientReceivedMessagesPerRPC, + Name: "grpc.io/client/received_messages_per_rpc", + Description: "Distribution of received messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientServerLatencyView = &view.View{ + Measure: ClientServerLatency, + Name: "grpc.io/client/server_latency", + Description: "Distribution of server latency as viewed by client, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } +) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientSentBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ClientRoundtripLatencyView, + ClientCompletedRPCsView, +} + +// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go new file mode 100644 index 000000000..303c607f6 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -0,0 +1,49 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "time" + + "go.opencensus.io/tag" + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the tag.Map populated by the application code, serializes +// its tags into the GRPC metadata in order to be sent to the server. +func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName) + } + return ctx + } + + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + ts := tag.FromContext(ctx) + if ts != nil { + encoded := tag.Encode(ts) + ctx = stats.SetTags(ctx, encoded) + } + + return context.WithValue(ctx, rpcDataKey, d) +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go new file mode 100644 index 000000000..1370323fb --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go @@ -0,0 +1,19 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ocgrpc contains OpenCensus stats and trace +// integrations for gRPC. +// +// Use ServerHandler for servers and ClientHandler for clients. +package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go new file mode 100644 index 000000000..b67b3e2be --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -0,0 +1,80 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + + "google.golang.org/grpc/stats" +) + +// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and +// traces. Use with gRPC servers. +// +// When installed (see Example), tracing metadata is read from inbound RPCs +// by default. If no tracing metadata is present, or if the tracing metadata is +// present but the SpanContext isn't sampled, then a new trace may be started +// (as determined by Sampler). +type ServerHandler struct { + // IsPublicEndpoint may be set to true to always start a new trace around + // each RPC. Any SpanContext in the RPC metadata will be added as a linked + // span instead of making it the parent of the span created around the + // server RPC. + // + // Be aware that if you leave this false (the default) on a public-facing + // server, callers will be able to send tracing metadata in gRPC headers + // and trigger traces in your backend. + IsPublicEndpoint bool + + // StartOptions to use for to spans started around RPCs handled by this server. + // + // These will apply even if there is tracing metadata already + // present on the inbound RPC but the SpanContext is not sampled. This + // ensures that each service has some opportunity to be traced. If you would + // like to not add any additional traces for this gRPC service, set: + // + // StartOptions.Sampler = trace.ProbabilitySampler(0.0) + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this handler. + StartOptions trace.StartOptions +} + +var _ stats.Handler = (*ServerHandler)(nil) + +// HandleConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = s.traceTagRPC(ctx, rti) + ctx = s.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go new file mode 100644 index 000000000..609d9ed24 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ServerHandler: +var ( + ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) + ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) +) + +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ServerReceivedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + Measure: ServerReceivedBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerSentBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_bytes_per_rpc", + Description: "Distribution of total sent bytes per RPC, by method.", + Measure: ServerSentBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerLatencyView = &view.View{ + Name: "grpc.io/server/server_latency", + Description: "Distribution of server latency in milliseconds, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerLatency, + Aggregation: DefaultMillisecondsDistribution, + } + + ServerCompletedRPCsView = &view.View{ + Name: "grpc.io/server/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, + Measure: ServerLatency, + Aggregation: view.Count(), + } + + ServerReceivedMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/received_messages_per_rpc", + Description: "Distribution of messages received count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerReceivedMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } + + ServerSentMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_messages_per_rpc", + Description: "Distribution of messages sent count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerSentMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } +) + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerReceivedBytesPerRPCView, + ServerSentBytesPerRPCView, + ServerLatencyView, + ServerCompletedRPCsView, +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go new file mode 100644 index 000000000..7847c1a91 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -0,0 +1,63 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "time" + + "golang.org/x/net/context" + + "go.opencensus.io/tag" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from +// it and creates a new tag.Map and puts them into the returned context. +func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("opencensus: TagRPC called with nil info.") + } + return ctx + } + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + propagated := h.extractPropagatedTags(ctx) + ctx = tag.NewContext(ctx, propagated) + ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) + return context.WithValue(ctx, rpcDataKey, d) +} + +// extractPropagatedTags creates a new tag map containing the tags extracted from the +// gRPC metadata. +func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { + buf := stats.Tags(ctx) + if buf == nil { + return nil + } + propagated, err := tag.Decode(buf) + if err != nil { + if grpclog.V(2) { + grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) + } + return nil + } + return propagated +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go new file mode 100644 index 000000000..e9991fe0f --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -0,0 +1,208 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "context" + "strconv" + "strings" + "sync/atomic" + "time" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +type grpcInstrumentationKey string + +// rpcData holds the instrumentation RPC data that is needed between the start +// and end of an call. It holds the info that this package needs to keep track +// of between the various GRPC events. +type rpcData struct { + // reqCount and respCount has to be the first words + // in order to be 64-aligned on 32-bit architectures. + sentCount, sentBytes, recvCount, recvBytes int64 // access atomically + + // startTime represents the time at which TagRPC was invoked at the + // beginning of an RPC. It is an appoximation of the time when the + // application code invoked GRPC code. + startTime time.Time + method string +} + +// The following variables define the default hard-coded auxiliary data used by +// both the default GRPC client and GRPC server metrics. +var ( + DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) +) + +// Server tags are applied to the context used to process each RPC, as well as +// the measures at the end of each RPC. +var ( + KeyServerMethod, _ = tag.NewKey("grpc_server_method") + KeyServerStatus, _ = tag.NewKey("grpc_server_status") +) + +// Client tags are applied to measures at the end of each RPC. +var ( + KeyClientMethod, _ = tag.NewKey("grpc_client_method") + KeyClientStatus, _ = tag.NewKey("grpc_client_status") +) + +var ( + rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") +) + +func methodName(fullname string) string { + return strings.TrimLeft(fullname, "/") +} + +// statsHandleRPC processes the RPC events. +func statsHandleRPC(ctx context.Context, s stats.RPCStats) { + switch st := s.(type) { + case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + // do nothing for client + case *stats.OutPayload: + handleRPCOutPayload(ctx, st) + case *stats.InPayload: + handleRPCInPayload(ctx, st) + case *stats.End: + handleRPCEnd(ctx, st) + default: + grpclog.Infof("unexpected stats: %T", st) + } +} + +func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.sentBytes, int64(s.Length)) + atomic.AddInt64(&d.sentCount, 1) +} + +func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.recvBytes, int64(s.Length)) + atomic.AddInt64(&d.recvCount, 1) +} + +func handleRPCEnd(ctx context.Context, s *stats.End) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + elapsedTime := time.Since(d.startTime) + + var st string + if s.Error != nil { + s, ok := status.FromError(s.Error) + if ok { + st = statusCodeToString(s) + } + } else { + st = "OK" + } + + latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + if s.Client { + ocstats.RecordWithTags(ctx, + []tag.Mutator{ + tag.Upsert(KeyClientMethod, methodName(d.method)), + tag.Upsert(KeyClientStatus, st), + }, + ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ClientRoundtripLatency.M(latencyMillis)) + } else { + ocstats.RecordWithTags(ctx, + []tag.Mutator{ + tag.Upsert(KeyServerStatus, st), + }, + ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ServerLatency.M(latencyMillis)) + } +} + +func statusCodeToString(s *status.Status) string { + // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md + switch c := s.Code(); c { + case codes.OK: + return "OK" + case codes.Canceled: + return "CANCELLED" + case codes.Unknown: + return "UNKNOWN" + case codes.InvalidArgument: + return "INVALID_ARGUMENT" + case codes.DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case codes.NotFound: + return "NOT_FOUND" + case codes.AlreadyExists: + return "ALREADY_EXISTS" + case codes.PermissionDenied: + return "PERMISSION_DENIED" + case codes.ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case codes.FailedPrecondition: + return "FAILED_PRECONDITION" + case codes.Aborted: + return "ABORTED" + case codes.OutOfRange: + return "OUT_OF_RANGE" + case codes.Unimplemented: + return "UNIMPLEMENTED" + case codes.Internal: + return "INTERNAL" + case codes.Unavailable: + return "UNAVAILABLE" + case codes.DataLoss: + return "DATA_LOSS" + case codes.Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE_" + strconv.FormatInt(int64(c), 10) + } +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go new file mode 100644 index 000000000..720f381c2 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -0,0 +1,107 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "strings" + + "google.golang.org/grpc/codes" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +const traceContextKey = "grpc-trace-bin" + +// TagRPC creates a new trace span for the client side of the RPC. +// +// It returns ctx with the new trace span added and a serialization of the +// SpanContext added to the outgoing gRPC metadata. +func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + ctx, span := trace.StartSpan(ctx, name, + trace.WithSampler(c.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC + traceContextBinary := propagation.Binary(span.SpanContext()) + return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) +} + +// TagRPC creates a new trace span for the server side of the RPC. +// +// It checks the incoming gRPC metadata in ctx for a SpanContext, and if +// it finds one, uses that SpanContext as the parent context of the new span. +// +// It returns ctx, with the new trace span added. +func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + md, _ := metadata.FromIncomingContext(ctx) + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + traceContext := md[traceContextKey] + var ( + parent trace.SpanContext + haveParent bool + ) + if len(traceContext) > 0 { + // Metadata with keys ending in -bin are actually binary. They are base64 + // encoded before being put on the wire, see: + // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata + traceContextBinary := []byte(traceContext[0]) + parent, haveParent = propagation.FromBinary(traceContextBinary) + if haveParent && !s.IsPublicEndpoint { + ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler), + ) + return ctx + } + } + ctx, span := trace.StartSpan(ctx, name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler)) + if haveParent { + span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) + } + return ctx +} + +func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { + span := trace.FromContext(ctx) + // TODO: compressed and uncompressed sizes are not populated in every message. + switch rs := rs.(type) { + case *stats.Begin: + span.AddAttributes( + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast)) + case *stats.InPayload: + span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) + case *stats.OutPayload: + span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) + case *stats.End: + if rs.Error != nil { + s, ok := status.FromError(rs.Error) + if ok { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) + } + } + span.End() + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 000000000..da815b2a7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + "net/http/httptrace" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: startOpts.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 000000000..17142aabe --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,143 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = wrappedBody(track, resp.Body) + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } + m := []stats.Measurement{ + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + t.respSize += int64(n) + switch err { + case nil: + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 000000000..10e626b16 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 000000000..2f1c7f006 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, receiver of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 000000000..5e6a34307 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 000000000..5fe15e89f --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,440 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var tags addedTags + r, traceEnd := h.startTrace(w, r) + defer traceEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeParent, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track.wrappedResponseWriter(), track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertion for ResponseWriter interface +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +var logTagsErrorOnce sync.Once + +func (t *trackingResponseWriter) end(tags *addedTags) { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 000000000..05c6c56cc --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 000000000..63bbcda5e --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,292 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host, _ = tag.NewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode, _ = tag.NewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path, _ = tag.NewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method, _ = tag.NewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute, _ = tag.NewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod, _ = tag.NewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath, _ = tag.NewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus, _ = tag.NewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost, _ = tag.NewKey("http_client_host") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +// Deprecated: Old client Views. +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientSentBytesDistribution. + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientReceivedBytesDistribution instead. + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientRoundtripLatencyDistribution instead. + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + } + + // Deprecated: Use ClientCompletedCount instead. + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientSentBytes, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientCompletedCount instead. + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 000000000..c23b97fb1 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,239 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/http/httptrace" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + URLAttribute = "http.url" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + ctx, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), + trace.StringAttribute(MethodAttribute, r.Method), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) + } + + return attrs +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 000000000..7d75cae2b --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 000000000..b1764e1d3 --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 000000000..00d473ee0 --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,69 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +Measures + +A measure represents a type of data point to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Measure constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. + +Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Measurements are created from measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and applications can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + +*/ +package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 000000000..36935e629 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 000000000..1ffd3cefc --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,109 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures +// to collect from a server. +// +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. +type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. + Name() string + + // Description returns the human-readable description of this measure. + Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m Measure + desc *measureDescriptor +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 000000000..f02c1eda8 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure for float64 values. +type Float64Measure struct { + desc *measureDescriptor +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: v, + } +} + +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 000000000..d101d7973 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure for int64 values. +type Int64Measure struct { + desc *measureDescriptor +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), + } +} + +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 000000000..d2af0a60d --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,69 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +// Record records one or multiple measurements with the same context at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + recorder := internal.DefaultRecorder + if recorder == nil { + return + } + if len(ms) == 0 { + return + } + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + // TODO(songy23): fix attachments. + recorder(tag.FromContext(ctx), ms, map[string]interface{}{}) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + ctx, err := tag.New(ctx, mutators...) + if err != nil { + return err + } + Record(ctx, ms...) + return nil +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 000000000..6931a5f29 --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 000000000..b7f169b4a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,120 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return &CountData{} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return &SumData{} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// An distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func() AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 000000000..d500e67f7 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,293 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" + "time" + + "go.opencensus.io/metric/metricdata" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(v float64, attachments map[string]interface{}, t time.Time) + clone() AggregationData + equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Value == a2.Value +} + +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return math.Pow(a.Value-a2.Value, 2) < epsilon +} + +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values +} + +func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 + return &DistributionData{ + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v + } + if v > a.Max { + a.Max = v + } + a.Count++ + a.addToBucket(v, attachments, t) + + if a.Count == 1 { + a.Mean = v + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) +} + +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break + } + } + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } +} + +func (a *DistributionData) clone() AggregationData { + c := *a + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + l.Value = v +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 000000000..8a6a2c0fd --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,86 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData() + c.signatures[s] = aggregator + } + aggregator.addSample(v, attachments, t) +} + +// collectRows returns a snapshot of the collected Row values. +func (c *collector) collectedRows(keys []tag.Key) []*Row { + rows := make([]*Row, 0, len(c.signatures)) + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{Tags: tags, Data: aggregator.clone()} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + vb := &tagencoding.Values{ + Buffer: make([]byte, len(keys)), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 000000000..dced225c3 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registerd and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 000000000..7cb59718f --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,58 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +import "sync" + +var ( + exportersMu sync.RWMutex // guards exporters + exporters = make(map[Exporter]struct{}) +) + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + exporters[e] = struct{}{} +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + delete(exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 000000000..37f88e1d9 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,221 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Register function to be before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function tp apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + +// canonicalize canonicalizes v by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot register view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + + return nil +} + +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, val, attachments, t) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + +func checkViewName(name string) error { + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + } + if !isPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 000000000..284299faf --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,131 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLableKeys(v *View) []string { + labelKeys := []string{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, k.Name()) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: getUnit(v.Measure.Unit()), + Type: getType(v), + LabelKeys: getLableKeys(v), + } +} + +func toLabelValues(row *Row) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + for _, tag := range row.Tags { + labelValues = append(labelValues, metricdata.NewLabelValue(tag.Value)) + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 000000000..37279b39e --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,279 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "sync" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = newWorker() + go defaultWorker.start() + internal.DefaultRecorder = record +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool + mu sync.RWMutex +} + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func Register(views ...*View) error { + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + defaultWorker.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unregisterFromViewReq{ + views: names, + done: make(chan struct{}), + } + defaultWorker.c <- req + <-req.done +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + req := &recordReq{ + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), + } + defaultWorker.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + defaultWorker.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +func newWorker() *worker { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + } +} + +func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + + for { + select { + case cmd := <-w.c: + cmd.handleCommand(w) + case <-w.timer.C: + w.reportUsage(time.Now()) + case <-w.quit: + w.timer.Stop() + close(w.c) + w.done <- true + return + } + } +} + +func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + + w.quit <- true + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + +func (w *worker) reportUsage(now time.Time) { + for _, v := range w.views { + w.reportView(v, now) + } +} + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 000000000..ba6203a50 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,182 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unregisterFromViewReq is the command to unregister to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unregisterFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + w.unregisterView(name) + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time +} + +func (cmd *recordReq) handleCommand(w *worker) { + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not registered + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the registered clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 000000000..b27d1b26b --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,43 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" +) + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 000000000..da16b74e4 --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 000000000..ebbed9500 --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,35 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey consequently with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 000000000..5b72ba6ad --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,197 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +// Map is a map of tags. Use New to create a context containing +// a new Map. +type Map struct { + m map[Key]string +} + +// Value returns the value for the key if a value for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + keys := make([]Key, 0, len(m.m)) + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = v +} + +func (m *Map) update(k Key, v string) { + if _, ok := m.m[k]; ok { + m.m[k] = v + } +} + +func (m *Map) upsert(k Key, v string) { + m.m[k] = v +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap() *Map { + return &Map{m: make(map[Key]string)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +func Insert(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +func Update(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +func Upsert(k Key, v string) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v) + return m, nil + }, + } +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap() + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 000000000..e88e72777 --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,237 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + if m == nil { + return nil + } + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(byte(tagsVersionID)) + for k, v := range m.m { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v)) + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} + +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string)) error { + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return nil + } + + version := eg.readByte() + if version > tagsVersionID { + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + key, err := NewKey(string(k)) + if err != nil { + return err + } + val := string(v) + if !checkValue(val) { + return errInvalidValue + } + fn(key, val) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 000000000..f81cd0b4a --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 000000000..83adbce56 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,23 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 000000000..0939fc674 --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 000000000..0c54492a2 --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,119 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 000000000..775f8274f --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,86 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int +} + +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } + config.Store(&c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 000000000..04b1ee4f3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + + +Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 000000000..ffc264f23 --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 000000000..e0d9a4b99 --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int +} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 000000000..7e808d8f3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,22 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +// IDGenerator allows custom generators for TraceId and SpanId. +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 000000000..3f80a3368 --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,37 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/hashicorp/golang-lru/simplelru" +) + +type lruMap struct { + simpleLruMap *simplelru.LRU + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{} + lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + return lm +} + +func (lm *lruMap) add(key, value interface{}) { + evicted := lm.simpleLruMap.Add(key, value) + if evicted { + lm.droppedCount++ + } +} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 000000000..1eb190a96 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation // import "go.opencensus.io/trace/propagation" + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 000000000..71c10f9e3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 000000000..fbabad34c --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 000000000..c442d9902 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,306 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for span := range s.active { + out = append(out, span.makeSpanData()) + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[*Span]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[*Span]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span *Span) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span *Span, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 000000000..ec60effd1 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 000000000..38ead7bf0 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,598 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type Span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := FromContext(ctx); p != nil { + p.addChild() + parent = p.spanContext + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { + span := &Span{} + span.spanContext = parent + + cfg := config.Load().(*Config) + + if !hasParent { + span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + span.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: span.spanContext.TraceID, + SpanID: span.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { + return span + } + + span.data = &SpanData{ + SpanContext: span.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + + if hasParent { + span.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + span.spanStore = ss + ss.add(span) + } + } + + return span +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *Span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.lruAttributes.simpleLruMap.Len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.simpleLruMap.Keys() { + value, ok := s.lruAttributes.simpleLruMap.Get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.copyToCappedAttributes(attributes) + s.mu.Unlock() +} + +// copyAttributes copies a slice of Attributes into a map. +func copyAttributes(m map[string]interface{}, attributes []Attribute) { + for _, a := range attributes { + m[a.key] = a.value + } +} + +func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { + now := time.Now() + msg := fmt.Sprintf(format, a...) + var m map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + m = make(map[string]interface{}) + copyAttributes(m, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: msg, + Attributes: m, + }) + s.mu.Unlock() +} + +func (s *Span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var a map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + a = make(map[string]interface{}) + copyAttributes(a, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: str, + Attributes: a, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.lazyPrintfInternal(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +func (s *Span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + gen := &defaultIDGenerator{} + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 000000000..b7d8aaf28 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,32 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 000000000..e25419859 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 000000000..2d6c713eb --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 000000000..8765c9fbc --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 000000000..1db6849fc --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,351 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" + "time" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..0489d19ba --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,55 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper around Value for errors +type Error struct{ v Value } + +// errorHolder is non-nil holder for error object. +// atomic.Value panics on saving nil object, so err object needs to be +// wrapped with valid object first. +type errorHolder struct{ err error } + +// NewError creates new atomic error object +func NewError(err error) *Error { + e := &Error{} + if err != nil { + e.Store(err) + } + return e +} + +// Load atomically loads the wrapped error +func (e *Error) Load() error { + v := e.v.Load() + if v == nil { + return nil + } + + eh := v.(errorHolder) + return eh.err +} + +// Store atomically stores error. +// NOTE: a holder object is allocated on each Store call. +func (e *Error) Store(err error) { + e.v.Store(errorHolder{err: err}) +} diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..ede8136fa --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 000000000..858e02475 --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 000000000..de6ce4736 --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,401 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Combine(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:") +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + _newline = []byte("\n") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, the returned slice is empty. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 000000000..6652bed45 --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 000000000..5be3704a3 --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 000000000..7592e8c63 --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,115 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import "strconv" + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 000000000..8fb3e202c --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 000000000..6fe17d9e0 --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,243 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// Values configured here are per-second. See zapcore.NewSampler for details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if cfg.Sampling != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 000000000..8638dd1b9 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 000000000..2e9d3c341 --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,75 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 000000000..65982a51e --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 000000000..5130e1347 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,310 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace()) +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 000000000..131287507 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 000000000..d02232e39 --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 2 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 000000000..1b0ecaca9 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "fmt" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// GET requests return a JSON description of the current logging level. PUT +// requests change the logging level and expect a payload like: +// {"level":"info"} +// +// It's perfectly safe to change the logging level while a program is running. +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level *zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + + case http.MethodGet: + current := lvl.Level() + enc.Encode(payload{Level: ¤t}) + + case http.MethodPut: + var req payload + + if errmess := func() string { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return fmt.Sprintf("Request body must be well-formed JSON: %v", err) + } + if req.Level == nil { + return "Must specify a logging level." + } + return "" + }(); errmess != "" { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: errmess}) + return + } + + lvl.SetLevel(*req.Level) + enc.Encode(req) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 000000000..dad583aaa --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 000000000..c4d5d02ab --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 000000000..dfc5b05fe --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,64 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var real = func() { os.Exit(1) } + +// Exit normally terminates the process by calling os.Exit(1). If the package +// is stubbed, it instead records a call in the testing spy. +func Exit() { + real() +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + prev func() +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: real} + real = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + real = se.prev +} + +func (se *StubbedExit) exit() { + se.Exited = true +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 000000000..3567a9a1e --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 000000000..dc8f6e3a4 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,305 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + name string + errorOutput zapcore.WriteSyncer + + addCaller bool + addStack zapcore.LevelEnabler + + callerSkip int +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(ioutil.Discard), + addStack: zapcore.FatalLevel + 1, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // check must always be called directly by a method in the Logger interface + // (e.g., Check, Info, Fatal). + const callerSkipOffset = 2 + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: time.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.Should(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + ce = ce.Should(ent, zapcore.WriteThenFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.Should(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + if log.addCaller { + ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) + if !ce.Entry.Caller.Defined { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + log.errorOutput.Sync() + } + } + if log.addStack.Enabled(ce.Entry.Level) { + ce.Entry.Stack = Stack("").String + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 000000000..7a6b0fca1 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "go.uber.org/zap/zapcore" + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename +// and line number of zap's caller. +func AddCaller() Option { + return optionFunc(func(log *Logger) { + log.addCaller = true + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 000000000..ff0becfe5 --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var ( + _sinkMutex sync.RWMutex + _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme +) + +func init() { + resetSinkRegistry() +} + +func resetSinkRegistry() { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + _sinkFactories = map[string]func(*url.URL) (Sink, error){ + schemeFile: newFileSink, + } +} + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 3.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := _sinkFactories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + _sinkFactories[normalized] = factory + return nil +} + +func newSink(rawURL string) (Sink, error) { + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + _sinkMutex.RLock() + factory, ok := _sinkFactories[u.Scheme] + _sinkMutex.RUnlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +func newFileSink(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + switch u.Path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 000000000..100fac216 --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,126 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "strings" + "sync" + + "go.uber.org/zap/internal/bufferpool" +) + +const _zapPackage = "go.uber.org/zap" + +var ( + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } + + // We add "." and "/" suffixes to the package name to ensure we only match + // the exact package and not any package with the same prefix. + _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") + _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) +) + +func takeStacktrace() string { + buffer := bufferpool.Get() + defer buffer.Free() + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var numFrames int + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + numFrames = runtime.Callers(2, programCounters.pcs) + if numFrames < len(programCounters.pcs) { + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + skipZapFrames := true // skip all consecutive zap frames at the beginning. + frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if skipZapFrames && isZapFrame(frame.Function) { + continue + } else { + skipZapFrames = false + } + + if i != 0 { + buffer.AppendByte('\n') + } + i++ + buffer.AppendString(frame.Function) + buffer.AppendByte('\n') + buffer.AppendByte('\t') + buffer.AppendString(frame.File) + buffer.AppendByte(':') + buffer.AppendInt(int64(frame.Line)) + } + + return buffer.String() +} + +func isZapFrame(function string) bool { + for _, prefix := range _zapStacktracePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + + // We can't use a prefix match here since the location of the vendor + // directory affects the prefix. Instead we do a contains match. + for _, contains := range _zapStacktraceVendorContains { + if strings.Contains(function, contains) { + return true + } + } + + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} + +func addPrefix(prefix string, ss ...string) []string { + withPrefix := make([]string, len(ss)) + for i, s := range ss { + withPrefix[i] = prefix + s + } + return withPrefix +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 000000000..77ca227f4 --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,304 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes three methods: one for loosely-typed +// structured logging, one for println-style formatting, and one for +// printf-style formatting. For example, SugaredLoggers can produce InfoLevel +// output with Infow ("info with" structured context), Info, or Infof. +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + // Format with Sprint, Sprintf, or neither. + msg := template + if msg == "" && len(fmtArgs) > 0 { + msg = fmt.Sprint(fmtArgs...) + } else if msg != "" && len(fmtArgs) > 0 { + msg = fmt.Sprintf(template, fmtArgs...) + } + + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields := make([]Field, 0, len(args)) + var invalid invalidPairs + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 000000000..c5a1f1622 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 000000000..86a709ab0 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,99 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "io/ioutil" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + close := func() { + for _, c := range closers { + c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + close() + return writers, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(ioutil.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 000000000..b7875966f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,147 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + for i := range arr.elems { + if i > 0 { + line.AppendByte('\t') + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addTabIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + if c.LineEnding != "" { + line.AppendString(c.LineEnding) + } else { + line.AppendString(DefaultLineEnding) + } + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer context.buf.Free() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addTabIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendByte('\t') + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 000000000..a1ef8b034 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 000000000..31000e91f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 000000000..f0509522b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,348 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) +} + +// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are +// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to +// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it's slow + // and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 000000000..7d9893f33 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,257 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "strings" + "sync" + "time" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + + "go.uber.org/multierr" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes a fatal os.Exit after Write. + WriteThenFatal +) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or Should on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + should CheckWriteAction + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.should = WriteThenNoop + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if ce.ErrorOutput != nil { + if err != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) + ce.ErrorOutput.Sync() + } + } + + should, msg := ce.should, ce.Message + putCheckedEntry(ce) + + switch should { + case WriteThenPanic: + panic(msg) + case WriteThenFatal: + exit.Exit() + } +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.should = should + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 000000000..a67c7bacc --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,120 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) error { + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +type causer interface { + // Provides access to the error that caused this error. + Cause() error +} + +// Note that errArry and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 000000000..6a5e33e2f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,201 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time. + TimeType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) + case ErrorType: + encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 000000000..5db4afb30 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 000000000..2dc67d81e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,502 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "encoding/json" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// {"foo":"bar","foo":"baz"} +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addKey(key) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendComplex128(val complex128) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addElementSeparator() + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined && final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + if final.LineEnding != "" { + final.buf.AppendString(final.LineEnding) + } else { + final.buf.AppendString(DefaultLineEnding) + } + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 000000000..e575c9f43 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,175 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel +) + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 000000000..7af8dadcb --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 000000000..2627a653d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 000000000..6ef85b09c --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 000000000..e31641863 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,134 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 +} + +// NewSampler creates a Core that samples incoming entries, which caps the CPU +// and I/O load of logging while attempting to preserve a representative subset +// of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + } +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + return ce + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 000000000..07a32eef9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 000000000..209e25fe2 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + // Copy to protect against https://github.com/golang/go/issues/7809 + return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..685f0e7ea --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..c646a6952 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..3ebf6f2da --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..feb1157b1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "time" + + "golang.org/x/oauth2" +) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens from either +// the current application's service account or from the metadata server, +// depending on the App Engine environment. See below for environment-specific +// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that +// involves user accounts, see oauth2.Config instead. +// +// First generation App Engine runtimes (<= Go 1.9): +// AppEngineTokenSource returns a token source that fetches tokens issued to the +// current App Engine application's service account. The provided context must have +// come from appengine.NewContext. +// +// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + return appEngineTokenSource(ctx, scope...) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go new file mode 100644 index 000000000..83dacac32 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// This file applies to App Engine first generation runtimes (<= Go 1.9). + +package google + +import ( + "context" + "sort" + "strings" + "sync" + + "golang.org/x/oauth2" + "google.golang.org/appengine" +) + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &gaeTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type gaeTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go new file mode 100644 index 000000000..04c2c2216 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..ad2c09236 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scopes) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) + // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) + // and App Engine flexible use ComputeTokenSource and the metadata server. + if appengineTokenFunc != nil { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource("", scopes...), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSON(ctx, b, scopes...) +} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go new file mode 100644 index 000000000..73be62903 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..81de32b36 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,209 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + AuthStyle: oauth2.AuthStyleInParams, +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://oauth2.googleapis.com/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// If no scopes are specified, a set of default scopes are automatically granted. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) +} + +type computeSource struct { + account string + scopes []string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenURI := "instance/service-accounts/" + acct + "/token" + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI = tokenURI + "?" + v.Encode() + } + tokenJSON, err := metadata.Get(tokenURI) + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + tok := &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..b0fdb3a88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..456224bc7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..683d2d271 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..b2bf18298 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration + + // Audience optionally specifies the intended audience of the + // request. If empty, the value of TokenURL is used as the + // intended audience. + Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := js.conf.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/utils/third_party/forked/golang/PATENTS b/vendor/golang.org/x/sync/PATENTS similarity index 100% rename from vendor/k8s.io/utils/third_party/forked/golang/PATENTS rename to vendor/golang.org/x/sync/PATENTS diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..7f096fef0 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,127 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1db2f00de..5213d820a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1135,6 +1149,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1997,6 +2025,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2204,6 +2236,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2423,6 +2456,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8a9d2eadf..39b630cc5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1135,6 +1149,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1998,6 +2026,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2205,6 +2237,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2424,6 +2457,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 2e7455814..c59a1beb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2004,6 +2032,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2211,6 +2243,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2430,6 +2463,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x800854db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index b1dc633a2..5f35c19d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -528,6 +541,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1137,6 +1151,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1988,6 +2016,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2196,6 +2228,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2415,6 +2448,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index ad4d9afb6..7f1b7bef2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1997,6 +2025,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2237,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2458,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index fe2965028..603d88b8b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1997,6 +2025,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2237,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2458,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 608878303..ed178f8a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1997,6 +2025,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2237,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2458,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 4cf9ddfad..080b78933 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1997,6 +2025,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x467f SIOCOUTQ = 0x7472 SIOCOUTQNSD = 0x894b @@ -2205,6 +2237,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2425,6 +2458,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x400854db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 374e3007f..961e8eabe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2055,6 +2083,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2262,6 +2294,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2485,6 +2518,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index badf14102..6e0538f22 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2055,6 +2083,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2262,6 +2294,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2485,6 +2518,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 0ce8c7eff..06c0148c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -1985,6 +2013,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2192,6 +2224,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2411,6 +2444,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 47675125a..39875095c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -196,6 +196,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -217,6 +219,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -238,16 +245,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -290,8 +300,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -411,6 +423,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -527,6 +540,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1134,6 +1148,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2058,6 +2086,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x541b SIOCOUTQ = 0x5411 SIOCOUTQNSD = 0x894b @@ -2265,6 +2297,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2484,6 +2517,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 TUNGETFEATURES = 0x800454cf TUNGETFILTER = 0x801054db TUNGETIFF = 0x800454d2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index a46fc9b43..8d80f99bc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -199,6 +199,8 @@ const ( BPF_A = 0x10 BPF_ABS = 0x20 BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_ALU = 0x4 BPF_ALU64 = 0x7 BPF_AND = 0x50 @@ -220,6 +222,11 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 @@ -241,16 +248,19 @@ const ( BPF_F_PSEUDO_HDR = 0x10 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 @@ -293,8 +303,10 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 @@ -414,6 +426,7 @@ const ( CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 CLONE_PTRACE = 0x2000 CLONE_SETTLS = 0x80000 CLONE_SIGHAND = 0x800 @@ -531,6 +544,7 @@ const ( ETH_P_DNA_RC = 0x6002 ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be @@ -1138,6 +1152,20 @@ const ( LOCK_NB = 0x4 LOCK_SH = 0x1 LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 @@ -2050,6 +2078,10 @@ const ( SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCGSTAMP_OLD = 0x8906 SIOCINQ = 0x4004667f SIOCOUTQ = 0x40047473 SIOCOUTQNSD = 0x894b @@ -2257,6 +2289,7 @@ const ( SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 @@ -2473,6 +2506,7 @@ const ( TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 TUNGETFEATURES = 0x400454cf TUNGETFILTER = 0x401054db TUNGETIFF = 0x400454d2 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 33b6e4d1a..e869c0603 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -423,4 +423,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 9ba207847..4917b8ab6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -345,4 +345,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 94f68f101..f85fcb4f8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 15c413516..678a119bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -290,4 +290,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 638465b14..222c9f9a2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -408,4 +408,10 @@ const ( SYS_IO_URING_SETUP = 4425 SYS_IO_URING_ENTER = 4426 SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 57ec82aac..28e6d0e9d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -338,4 +338,10 @@ const ( SYS_IO_URING_SETUP = 5425 SYS_IO_URING_ENTER = 5426 SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 825a3e3b0..e643c6f63 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -338,4 +338,10 @@ const ( SYS_IO_URING_SETUP = 5425 SYS_IO_URING_ENTER = 5426 SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index f152dfdd0..01d93c420 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -408,4 +408,10 @@ const ( SYS_IO_URING_SETUP = 4425 SYS_IO_URING_ENTER = 4426 SYS_IO_URING_REGISTER = 4427 + SYS_OPEN_TREE = 4428 + SYS_MOVE_MOUNT = 4429 + SYS_FSOPEN = 4430 + SYS_FSCONFIG = 4431 + SYS_FSMOUNT = 4432 + SYS_FSPICK = 4433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 7cbe78b19..5744149eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 51a2f1236..21c832042 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -387,4 +387,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 323432ae3..c1bb6d8f2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -289,4 +289,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 9dca97484..bc3cc6b5b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -352,4 +352,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index d3da46f0d..0a2841ba8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -367,4 +367,10 @@ const ( SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 5492b9666..50bc4128f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2484,3 +2484,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index caf33b2c5..055eaa76a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2497,3 +2497,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 93aec7e22..66019c9cf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2475,3 +2475,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint32 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]uint8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 0a038436d..3104798c4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2476,3 +2476,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2de0e5800..46c86021b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2481,3 +2481,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 3735eb42e..c2fe1a62a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2478,3 +2478,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 073c29939..f1eb0d397 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2478,3 +2478,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 58d09f75e..8759bc36b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2481,3 +2481,40 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint32 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint32 + Reserved [4]int8 +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 3f1e62e03..a81200541 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2486,3 +2486,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index e67be11eb..74b7a9199 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2486,3 +2486,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint64 + Inode uint64 + Rdevice uint64 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index f44f29403..8344583e7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2503,3 +2503,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]uint8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 90bf5dcc7..d8fc0bc1c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2500,3 +2500,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint16 + Inode uint64 + Rdevice uint16 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 4f054dcbb..5e0ab9329 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2481,3 +2481,41 @@ const ( LINUX_CAPABILITY_VERSION_2 = 0x20071026 LINUX_CAPABILITY_VERSION_3 = 0x20080522 ) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 22ff769ef..b696b6870 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -16,14 +16,29 @@ import ( "strings" ) -// Driver +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. type driverRequest struct { - Command string `json:"command"` - Mode LoadMode `json:"mode"` - Env []string `json:"env"` - BuildFlags []string `json:"build_flags"` - Tests bool `json:"tests"` - Overlay map[string][]byte `json:"overlay"` + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` } // findExternalDriver returns the file path of a tool that supplies diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 00e21a755..44df8210d 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -316,9 +316,7 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer startWalk := time.Now() gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) - if debug { - log.Printf("%v for walk", time.Since(startWalk)) - } + cfg.Logf("%v for walk", time.Since(startWalk)) // Weird special case: the top-level package in a module will be in // whatever directory the user checked the repository out into. It's @@ -759,11 +757,9 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { cmd.Dir = cfg.Dir cmd.Stdout = stdout cmd.Stderr = stderr - if debug { - defer func(start time.Time) { - log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) - }(time.Now()) - } + defer func(start time.Time) { + cfg.Logf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) + }(time.Now()) if err := cmd.Run(); err != nil { // Check for 'go' executable not being found. diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index f20e444f4..f1fe568f7 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -103,6 +103,12 @@ type Config struct { // If Context is nil, the load cannot be cancelled. Context context.Context + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + // Dir is the directory in which to run the build system's query tool // that provides information about the packages. // If Dir is empty, the tool is run in the current directory. @@ -429,6 +435,17 @@ func newLoader(cfg *Config) *loader { } if cfg != nil { ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } } if ld.Config.Mode == 0 { ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 2c58d18be..72323f585 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -272,7 +272,7 @@ func (p *pass) loadPackageNames(imports []*importInfo) error { unknown = append(unknown, imp.importPath) } - names, err := p.env.getResolver().loadPackageNames(unknown, p.srcDir) + names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir) if err != nil { return err } @@ -595,7 +595,7 @@ type ProcessEnv struct { // Logf is the default logger for the ProcessEnv. Logf func(format string, args ...interface{}) - resolver resolver + resolver Resolver } func (e *ProcessEnv) env() []string { @@ -617,7 +617,7 @@ func (e *ProcessEnv) env() []string { return env } -func (e *ProcessEnv) getResolver() resolver { +func (e *ProcessEnv) GetResolver() Resolver { if e.resolver != nil { return e.resolver } @@ -631,7 +631,7 @@ func (e *ProcessEnv) getResolver() resolver { e.resolver = &gopathResolver{env: e} return e.resolver } - e.resolver = &moduleResolver{env: e} + e.resolver = &ModuleResolver{env: e} return e.resolver } @@ -700,15 +700,15 @@ func addStdlibCandidates(pass *pass, refs references) { } } -// A resolver does the build-system-specific parts of goimports. -type resolver interface { +// A Resolver does the build-system-specific parts of goimports. +type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) // scan finds (at least) the packages satisfying refs. The returned slice is unordered. scan(refs references) ([]*pkg, error) } -// gopathResolver implements resolver for GOPATH and module workspaces using go/packages. +// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. type goPackagesResolver struct { env *ProcessEnv } @@ -758,7 +758,7 @@ func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) { } func addExternalCandidates(pass *pass, refs references, filename string) error { - dirScan, err := pass.env.getResolver().scan(refs) + dirScan, err := pass.env.GetResolver().scan(refs) if err != nil { return err } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index a0ebd0737..3d685332d 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -18,37 +18,37 @@ import ( "golang.org/x/tools/internal/module" ) -// moduleResolver implements resolver for modules using the go command as little +// ModuleResolver implements resolver for modules using the go command as little // as feasible. -type moduleResolver struct { +type ModuleResolver struct { env *ProcessEnv - initialized bool - main *moduleJSON - modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*moduleJSON // ...or Dir. + Initialized bool + Main *ModuleJSON + ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... + ModsByDir []*ModuleJSON // ...or Dir. } -type moduleJSON struct { +type ModuleJSON struct { Path string // module path Version string // module version Versions []string // available module versions (with -versions) - Replace *moduleJSON // replaced by this module + Replace *ModuleJSON // replaced by this module Time *time.Time // time version was created - Update *moduleJSON // available update, if any (with -u) + Update *ModuleJSON // available update, if any (with -u) Main bool // is this the main module? Indirect bool // is this module only an indirect dependency of main module? Dir string // directory holding files for this module, if any GoMod string // path to go.mod file for this module, if any - Error *moduleErrorJSON // error loading module + Error *ModuleErrorJSON // error loading module } -type moduleErrorJSON struct { +type ModuleErrorJSON struct { Err string // the error itself } -func (r *moduleResolver) init() error { - if r.initialized { +func (r *ModuleResolver) init() error { + if r.Initialized { return nil } stdout, err := r.env.invokeGo("list", "-m", "-json", "...") @@ -56,7 +56,7 @@ func (r *moduleResolver) init() error { return err } for dec := json.NewDecoder(stdout); dec.More(); { - mod := &moduleJSON{} + mod := &ModuleJSON{} if err := dec.Decode(mod); err != nil { return err } @@ -67,34 +67,34 @@ func (r *moduleResolver) init() error { // Can't do anything with a module that's not downloaded. continue } - r.modsByModPath = append(r.modsByModPath, mod) - r.modsByDir = append(r.modsByDir, mod) + r.ModsByModPath = append(r.ModsByModPath, mod) + r.ModsByDir = append(r.ModsByDir, mod) if mod.Main { - r.main = mod + r.Main = mod } } - sort.Slice(r.modsByModPath, func(i, j int) bool { + sort.Slice(r.ModsByModPath, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.modsByModPath[x].Path, "/") + return strings.Count(r.ModsByModPath[x].Path, "/") } return count(j) < count(i) // descending order }) - sort.Slice(r.modsByDir, func(i, j int) bool { + sort.Slice(r.ModsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.modsByDir[x].Dir, "/") + return strings.Count(r.ModsByDir[x].Dir, "/") } return count(j) < count(i) // descending order }) - r.initialized = true + r.Initialized = true return nil } // findPackage returns the module and directory that contains the package at // the given import path, or returns nil, "" if no module is in scope. -func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) { - for _, m := range r.modsByModPath { +func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { + for _, m := range r.ModsByModPath { if !strings.HasPrefix(importPath, m.Path) { continue } @@ -123,7 +123,7 @@ func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) { // findModuleByDir returns the module that contains dir, or nil if no such // module is in scope. -func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON { +func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // This is quite tricky and may not be correct. dir could be: // - a package in the main module. // - a replace target underneath the main module's directory. @@ -134,7 +134,7 @@ func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. - for _, m := range r.modsByDir { + for _, m := range r.ModsByDir { if !strings.HasPrefix(dir, m.Dir) { continue } @@ -150,7 +150,7 @@ func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON { // dirIsNestedModule reports if dir is contained in a nested module underneath // mod, not actually in mod. -func dirIsNestedModule(dir string, mod *moduleJSON) bool { +func dirIsNestedModule(dir string, mod *ModuleJSON) bool { if !strings.HasPrefix(dir, mod.Dir) { return false } @@ -176,7 +176,7 @@ func findModFile(dir string) string { } } -func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { +func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { if err := r.init(); err != nil { return nil, err } @@ -195,7 +195,7 @@ func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) ( return names, nil } -func (r *moduleResolver) scan(_ references) ([]*pkg, error) { +func (r *ModuleResolver) scan(_ references) ([]*pkg, error) { if err := r.init(); err != nil { return nil, err } @@ -204,15 +204,15 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) { roots := []gopathwalk.Root{ {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, } - if r.main != nil { - roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + if r.Main != nil { + roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) } for _, p := range filepath.SplitList(r.env.GOPATH) { roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) } // Walk replace targets, just in case they're not in any of the above. - for _, mod := range r.modsByModPath { + for _, mod := range r.ModsByModPath { if mod.Replace != nil { roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) } @@ -247,7 +247,7 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) { } switch root.Type { case gopathwalk.RootCurrentModule: - importPath = path.Join(r.main.Path, filepath.ToSlash(subdir)) + importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir)) case gopathwalk.RootModuleCache: matches := modCacheRegexp.FindStringSubmatch(subdir) modPath, err := module.DecodePath(filepath.ToSlash(matches[1])) diff --git a/vendor/gonum.org/v1/gonum/.travis/deps.d/linux/01-deps.sh b/vendor/gonum.org/v1/gonum/.travis/deps.d/linux/01-deps.sh deleted file mode 120000 index 920836a78..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/deps.d/linux/01-deps.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/deps.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/deps.d/osx/nothing.sh b/vendor/gonum.org/v1/gonum/.travis/deps.d/osx/nothing.sh deleted file mode 120000 index 18b172824..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/deps.d/osx/nothing.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/nothing.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/deps.d/windows/nothing.sh b/vendor/gonum.org/v1/gonum/.travis/deps.d/windows/nothing.sh deleted file mode 120000 index 18b172824..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/deps.d/windows/nothing.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/nothing.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/01-check-copyright.sh b/vendor/gonum.org/v1/gonum/.travis/run.d/linux/01-check-copyright.sh deleted file mode 120000 index 31835de71..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/01-check-copyright.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/check-copyright.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/02-check-imports.sh b/vendor/gonum.org/v1/gonum/.travis/run.d/linux/02-check-imports.sh deleted file mode 120000 index 2eb0ad5f0..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/02-check-imports.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/check-imports.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/03-check-formatting.sh b/vendor/gonum.org/v1/gonum/.travis/run.d/linux/03-check-formatting.sh deleted file mode 120000 index e82d24d87..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/03-check-formatting.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/check-formatting.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/04-test.sh b/vendor/gonum.org/v1/gonum/.travis/run.d/linux/04-test.sh deleted file mode 120000 index b86e9405d..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/04-test.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/test.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/05-test-coverage.sh b/vendor/gonum.org/v1/gonum/.travis/run.d/linux/05-test-coverage.sh deleted file mode 120000 index 3557f725d..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/05-test-coverage.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/test-coverage.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/06-check-generate.sh b/vendor/gonum.org/v1/gonum/.travis/run.d/linux/06-check-generate.sh deleted file mode 120000 index 3131460b4..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/run.d/linux/06-check-generate.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/check-generate.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/.travis/run.d/osx/01-test.sh b/vendor/gonum.org/v1/gonum/.travis/run.d/osx/01-test.sh deleted file mode 120000 index b86e9405d..000000000 --- a/vendor/gonum.org/v1/gonum/.travis/run.d/osx/01-test.sh +++ /dev/null @@ -1 +0,0 @@ -../../script.d/test.sh \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/gonum.org/v1/gonum/AUTHORS deleted file mode 100644 index 7a60a67c3..000000000 --- a/vendor/gonum.org/v1/gonum/AUTHORS +++ /dev/null @@ -1,92 +0,0 @@ -# This is the official list of gonum authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Alexander Egurnov -Bill Gray -Bill Noon -Brendan Tracey -Brent Pedersen -Chad Kunde -Chih-Wei Chang -Chris Tessum -Christophe Meessen -Clayton Northey -Dan Kortschak -Daniel Fireman -David Samborski -Davor Kapsa -DeepMind Technologies -Delaney Gillilan -Dezmond Goff -Dong-hee Na -Egon Elbre -Ekaterina Efimova -Ethan Burns -Evert Lammerts -Facundo Gaich -Fazlul Shahriar -Francesc Campoy -Google Inc -Gustaf Johansson -Iakov Davydov -Igor Mikushkin -Iskander Sharipov -Jalem Raj Rohit -James Bell -James Bowman -James Holmes <32bitkid@gmail.com> -Janne Snabb -Jeff Juozapaitis -Jeremy Atkinson -Jonas Kahler -Jonas Schulze -Jonathan J Lawlor -Jonathan Reiter -Jonathan Schroeder -Joseph Watson -Josh Wilson -Julien Roland -Kai Trukenmüller -Kent English -Kevin C. Zimmerman -Kirill Motkov -Konstantin Shaposhnikov -Leonid Kneller -Lyron Winderbaum -Martin Diz -Matthieu Di Mercurio -Max Halford -MinJae Kwon -Nick Potts -Olivier Wulveryck -Or Rikon -Pontus Melke -Renée French -Rishi Desai -Robin Eklind -Sam Zaydel -Samuel Kelemen -Saran Ahluwalia -Scott Holden -Sebastien Binet -Shawn Smith -source{d} -Spencer Lyon -Steve McCoy -Taesu Pyo -Takeshi Yoneda -The University of Adelaide -The University of Minnesota -The University of Washington -Thomas Berg -Tobin Harding -Vincent Thiery -Vladimír Chalupecký -Yevgeniy Vahlis diff --git a/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/gonum.org/v1/gonum/CONTRIBUTORS deleted file mode 100644 index 1734ca4e9..000000000 --- a/vendor/gonum.org/v1/gonum/CONTRIBUTORS +++ /dev/null @@ -1,94 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the gonum -# repository. -# -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees would be listed here -# but not in AUTHORS, because Google would hold the copyright. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file. -# -# Names should be added to this file like so: -# Name -# -# Please keep the list sorted. - -Alexander Egurnov -Andrew Brampton -Bill Gray -Bill Noon -Brendan Tracey -Brent Pedersen -Chad Kunde -Chih-Wei Chang -Chris Tessum -Christophe Meessen -Clayton Northey -Dan Kortschak -Daniel Fireman -David Samborski -Davor Kapsa -Delaney Gillilan -Dezmond Goff -Dong-hee Na -Egon Elbre -Ekaterina Efimova -Ethan Burns -Evert Lammerts -Facundo Gaich -Fazlul Shahriar -Francesc Campoy -Gustaf Johansson -Iakov Davydov -Igor Mikushkin -Iskander Sharipov -Jalem Raj Rohit -James Bell -James Bowman -James Holmes <32bitkid@gmail.com> -Janne Snabb -Jeff Juozapaitis -Jeremy Atkinson -Jonas Kahler -Jonas Schulze -Jonathan J Lawlor -Jonathan Reiter -Jonathan Schroeder -Joseph Watson -Josh Wilson -Julien Roland -Kai Trukenmüller -Kent English -Kevin C. Zimmerman -Kirill Motkov -Konstantin Shaposhnikov -Leonid Kneller -Lyron Winderbaum -Martin Diz -Matthieu Di Mercurio -Max Halford -MinJae Kwon -Nick Potts -Olivier Wulveryck -Or Rikon -Pontus Melke -Renée French -Rishi Desai -Robin Eklind -Sam Zaydel -Samuel Kelemen -Saran Ahluwalia -Scott Holden -Sebastien Binet -Shawn Smith -Spencer Lyon -Steve McCoy -Taesu Pyo -Takeshi Yoneda -Thomas Berg -Tobin Harding -Vincent Thiery -Vladimír Chalupecký -Yevgeniy Vahlis diff --git a/vendor/gonum.org/v1/gonum/LICENSE b/vendor/gonum.org/v1/gonum/LICENSE deleted file mode 100644 index 5f1c3f9cc..000000000 --- a/vendor/gonum.org/v1/gonum/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright ©2013 The Gonum Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the gonum project nor the names of its authors and - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/blas/blas.go b/vendor/gonum.org/v1/gonum/blas/blas.go deleted file mode 100644 index 9b933e3fc..000000000 --- a/vendor/gonum.org/v1/gonum/blas/blas.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate ./conversions.bash - -package blas - -// Flag constants indicate Givens transformation H matrix state. -type Flag int - -const ( - Identity Flag = -2 // H is the identity matrix; no rotation is needed. - Rescaling Flag = -1 // H specifies rescaling. - OffDiagonal Flag = 0 // Off-diagonal elements of H are non-unit. - Diagonal Flag = 1 // Diagonal elements of H are non-unit. -) - -// SrotmParams contains Givens transformation parameters returned -// by the Float32 Srotm method. -type SrotmParams struct { - Flag - H [4]float32 // Column-major 2 by 2 matrix. -} - -// DrotmParams contains Givens transformation parameters returned -// by the Float64 Drotm method. -type DrotmParams struct { - Flag - H [4]float64 // Column-major 2 by 2 matrix. -} - -// Transpose specifies the transposition operation of a matrix. -type Transpose byte - -const ( - NoTrans Transpose = 'N' - Trans Transpose = 'T' - ConjTrans Transpose = 'C' -) - -// Uplo specifies whether a matrix is upper or lower triangular. -type Uplo byte - -const ( - Upper Uplo = 'U' - Lower Uplo = 'L' - All Uplo = 'A' -) - -// Diag specifies whether a matrix is unit triangular. -type Diag byte - -const ( - NonUnit Diag = 'N' - Unit Diag = 'U' -) - -// Side specifies from which side a multiplication operation is performed. -type Side byte - -const ( - Left Side = 'L' - Right Side = 'R' -) - -// Float32 implements the single precision real BLAS routines. -type Float32 interface { - Float32Level1 - Float32Level2 - Float32Level3 -} - -// Float32Level1 implements the single precision real BLAS Level 1 routines. -type Float32Level1 interface { - Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 - Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 - Sdot(n int, x []float32, incX int, y []float32, incY int) float32 - Snrm2(n int, x []float32, incX int) float32 - Sasum(n int, x []float32, incX int) float32 - Isamax(n int, x []float32, incX int) int - Sswap(n int, x []float32, incX int, y []float32, incY int) - Scopy(n int, x []float32, incX int, y []float32, incY int) - Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) - Srotg(a, b float32) (c, s, r, z float32) - Srotmg(d1, d2, b1, b2 float32) (p SrotmParams, rd1, rd2, rb1 float32) - Srot(n int, x []float32, incX int, y []float32, incY int, c, s float32) - Srotm(n int, x []float32, incX int, y []float32, incY int, p SrotmParams) - Sscal(n int, alpha float32, x []float32, incX int) -} - -// Float32Level2 implements the single precision real BLAS Level 2 routines. -type Float32Level2 interface { - Sgemv(tA Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) - Sgbmv(tA Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) - Strmv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) - Stbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) - Stpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) - Strsv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) - Stbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) - Stpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) - Ssymv(ul Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) - Ssbmv(ul Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) - Sspmv(ul Uplo, n int, alpha float32, ap []float32, x []float32, incX int, beta float32, y []float32, incY int) - Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) - Ssyr(ul Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) - Sspr(ul Uplo, n int, alpha float32, x []float32, incX int, ap []float32) - Ssyr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) - Sspr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32) -} - -// Float32Level3 implements the single precision real BLAS Level 3 routines. -type Float32Level3 interface { - Sgemm(tA, tB Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) - Ssymm(s Side, ul Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) - Ssyrk(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) - Ssyr2k(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) - Strmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) - Strsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) -} - -// Float64 implements the single precision real BLAS routines. -type Float64 interface { - Float64Level1 - Float64Level2 - Float64Level3 -} - -// Float64Level1 implements the double precision real BLAS Level 1 routines. -type Float64Level1 interface { - Ddot(n int, x []float64, incX int, y []float64, incY int) float64 - Dnrm2(n int, x []float64, incX int) float64 - Dasum(n int, x []float64, incX int) float64 - Idamax(n int, x []float64, incX int) int - Dswap(n int, x []float64, incX int, y []float64, incY int) - Dcopy(n int, x []float64, incX int, y []float64, incY int) - Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) - Drotg(a, b float64) (c, s, r, z float64) - Drotmg(d1, d2, b1, b2 float64) (p DrotmParams, rd1, rd2, rb1 float64) - Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) - Drotm(n int, x []float64, incX int, y []float64, incY int, p DrotmParams) - Dscal(n int, alpha float64, x []float64, incX int) -} - -// Float64Level2 implements the double precision real BLAS Level 2 routines. -type Float64Level2 interface { - Dgemv(tA Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) - Dgbmv(tA Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) - Dtrmv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) - Dtbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) - Dtpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) - Dtrsv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) - Dtbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) - Dtpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) - Dsymv(ul Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) - Dsbmv(ul Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) - Dspmv(ul Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) - Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) - Dsyr(ul Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) - Dspr(ul Uplo, n int, alpha float64, x []float64, incX int, ap []float64) - Dsyr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) - Dspr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64) -} - -// Float64Level3 implements the double precision real BLAS Level 3 routines. -type Float64Level3 interface { - Dgemm(tA, tB Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) - Dsymm(s Side, ul Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) - Dsyrk(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) - Dsyr2k(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) - Dtrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) - Dtrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) -} - -// Complex64 implements the single precision complex BLAS routines. -type Complex64 interface { - Complex64Level1 - Complex64Level2 - Complex64Level3 -} - -// Complex64Level1 implements the single precision complex BLAS Level 1 routines. -type Complex64Level1 interface { - Cdotu(n int, x []complex64, incX int, y []complex64, incY int) (dotu complex64) - Cdotc(n int, x []complex64, incX int, y []complex64, incY int) (dotc complex64) - Scnrm2(n int, x []complex64, incX int) float32 - Scasum(n int, x []complex64, incX int) float32 - Icamax(n int, x []complex64, incX int) int - Cswap(n int, x []complex64, incX int, y []complex64, incY int) - Ccopy(n int, x []complex64, incX int, y []complex64, incY int) - Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) - Cscal(n int, alpha complex64, x []complex64, incX int) - Csscal(n int, alpha float32, x []complex64, incX int) -} - -// Complex64Level2 implements the single precision complex BLAS routines Level 2 routines. -type Complex64Level2 interface { - Cgemv(tA Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) - Cgbmv(tA Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) - Ctrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) - Ctbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) - Ctpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) - Ctrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) - Ctbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) - Ctpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) - Chemv(ul Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) - Chbmv(ul Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) - Chpmv(ul Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) - Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) - Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) - Cher(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) - Chpr(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64) - Cher2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) - Chpr2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) -} - -// Complex64Level3 implements the single precision complex BLAS Level 3 routines. -type Complex64Level3 interface { - Cgemm(tA, tB Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) - Csymm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) - Csyrk(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) - Csyr2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) - Ctrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) - Ctrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) - Chemm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) - Cherk(ul Uplo, t Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) - Cher2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) -} - -// Complex128 implements the double precision complex BLAS routines. -type Complex128 interface { - Complex128Level1 - Complex128Level2 - Complex128Level3 -} - -// Complex128Level1 implements the double precision complex BLAS Level 1 routines. -type Complex128Level1 interface { - Zdotu(n int, x []complex128, incX int, y []complex128, incY int) (dotu complex128) - Zdotc(n int, x []complex128, incX int, y []complex128, incY int) (dotc complex128) - Dznrm2(n int, x []complex128, incX int) float64 - Dzasum(n int, x []complex128, incX int) float64 - Izamax(n int, x []complex128, incX int) int - Zswap(n int, x []complex128, incX int, y []complex128, incY int) - Zcopy(n int, x []complex128, incX int, y []complex128, incY int) - Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) - Zscal(n int, alpha complex128, x []complex128, incX int) - Zdscal(n int, alpha float64, x []complex128, incX int) -} - -// Complex128Level2 implements the double precision complex BLAS Level 2 routines. -type Complex128Level2 interface { - Zgemv(tA Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) - Zgbmv(tA Transpose, m, n int, kL int, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) - Ztrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) - Ztbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) - Ztpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) - Ztrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) - Ztbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) - Ztpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) - Zhemv(ul Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) - Zhbmv(ul Uplo, n, k int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) - Zhpmv(ul Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) - Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) - Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) - Zher(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) - Zhpr(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128) - Zher2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) - Zhpr2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) -} - -// Complex128Level3 implements the double precision complex BLAS Level 3 routines. -type Complex128Level3 interface { - Zgemm(tA, tB Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) - Zsymm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) - Zsyrk(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) - Zsyr2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) - Ztrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) - Ztrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) - Zhemm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) - Zherk(ul Uplo, t Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) - Zher2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) -} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go b/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go deleted file mode 100644 index 551983836..000000000 --- a/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blas64 - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/gonum" -) - -var blas64 blas.Float64 = gonum.Implementation{} - -// Use sets the BLAS float64 implementation to be used by subsequent BLAS calls. -// The default implementation is -// gonum.org/v1/gonum/blas/gonum.Implementation. -func Use(b blas.Float64) { - blas64 = b -} - -// Implementation returns the current BLAS float64 implementation. -// -// Implementation allows direct calls to the current the BLAS float64 implementation -// giving finer control of parameters. -func Implementation() blas.Float64 { - return blas64 -} - -// Vector represents a vector with an associated element increment. -type Vector struct { - N int - Data []float64 - Inc int -} - -// General represents a matrix using the conventional storage scheme. -type General struct { - Rows, Cols int - Data []float64 - Stride int -} - -// Band represents a band matrix using the band storage scheme. -type Band struct { - Rows, Cols int - KL, KU int - Data []float64 - Stride int -} - -// Triangular represents a triangular matrix using the conventional storage scheme. -type Triangular struct { - Uplo blas.Uplo - Diag blas.Diag - N int - Data []float64 - Stride int -} - -// TriangularBand represents a triangular matrix using the band storage scheme. -type TriangularBand struct { - Uplo blas.Uplo - Diag blas.Diag - N, K int - Data []float64 - Stride int -} - -// TriangularPacked represents a triangular matrix using the packed storage scheme. -type TriangularPacked struct { - Uplo blas.Uplo - Diag blas.Diag - N int - Data []float64 -} - -// Symmetric represents a symmetric matrix using the conventional storage scheme. -type Symmetric struct { - Uplo blas.Uplo - N int - Data []float64 - Stride int -} - -// SymmetricBand represents a symmetric matrix using the band storage scheme. -type SymmetricBand struct { - Uplo blas.Uplo - N, K int - Data []float64 - Stride int -} - -// SymmetricPacked represents a symmetric matrix using the packed storage scheme. -type SymmetricPacked struct { - Uplo blas.Uplo - N int - Data []float64 -} - -// Level 1 - -const ( - negInc = "blas64: negative vector increment" - badLength = "blas64: vector length mismatch" -) - -// Dot computes the dot product of the two vectors: -// \sum_i x[i]*y[i]. -func Dot(x, y Vector) float64 { - if x.N != y.N { - panic(badLength) - } - return blas64.Ddot(x.N, x.Data, x.Inc, y.Data, y.Inc) -} - -// Nrm2 computes the Euclidean norm of the vector x: -// sqrt(\sum_i x[i]*x[i]). -// -// Nrm2 will panic if the vector increment is negative. -func Nrm2(x Vector) float64 { - if x.Inc < 0 { - panic(negInc) - } - return blas64.Dnrm2(x.N, x.Data, x.Inc) -} - -// Asum computes the sum of the absolute values of the elements of x: -// \sum_i |x[i]|. -// -// Asum will panic if the vector increment is negative. -func Asum(x Vector) float64 { - if x.Inc < 0 { - panic(negInc) - } - return blas64.Dasum(x.N, x.Data, x.Inc) -} - -// Iamax returns the index of an element of x with the largest absolute value. -// If there are multiple such indices the earliest is returned. -// Iamax returns -1 if n == 0. -// -// Iamax will panic if the vector increment is negative. -func Iamax(x Vector) int { - if x.Inc < 0 { - panic(negInc) - } - return blas64.Idamax(x.N, x.Data, x.Inc) -} - -// Swap exchanges the elements of the two vectors: -// x[i], y[i] = y[i], x[i] for all i. -func Swap(x, y Vector) { - if x.N != y.N { - panic(badLength) - } - blas64.Dswap(x.N, x.Data, x.Inc, y.Data, y.Inc) -} - -// Copy copies the elements of x into the elements of y: -// y[i] = x[i] for all i. -// Copy requires that the lengths of x and y match and will panic otherwise. -func Copy(x, y Vector) { - if x.N != y.N { - panic(badLength) - } - blas64.Dcopy(x.N, x.Data, x.Inc, y.Data, y.Inc) -} - -// Axpy adds x scaled by alpha to y: -// y[i] += alpha*x[i] for all i. -func Axpy(alpha float64, x, y Vector) { - if x.N != y.N { - panic(badLength) - } - blas64.Daxpy(x.N, alpha, x.Data, x.Inc, y.Data, y.Inc) -} - -// Rotg computes the parameters of a Givens plane rotation so that -// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ -// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ -// where a and b are the Cartesian coordinates of a given point. -// c, s, and r are defined as -// r = ±Sqrt(a^2 + b^2), -// c = a/r, the cosine of the rotation angle, -// s = a/r, the sine of the rotation angle, -// and z is defined such that -// if |a| > |b|, z = s, -// otherwise if c != 0, z = 1/c, -// otherwise z = 1. -func Rotg(a, b float64) (c, s, r, z float64) { - return blas64.Drotg(a, b) -} - -// Rotmg computes the modified Givens rotation. See -// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html -// for more details. -func Rotmg(d1, d2, b1, b2 float64) (p blas.DrotmParams, rd1, rd2, rb1 float64) { - return blas64.Drotmg(d1, d2, b1, b2) -} - -// Rot applies a plane transformation to n points represented by the vectors x -// and y: -// x[i] = c*x[i] + s*y[i], -// y[i] = -s*x[i] + c*y[i], for all i. -func Rot(x, y Vector, c, s float64) { - if x.N != y.N { - panic(badLength) - } - blas64.Drot(x.N, x.Data, x.Inc, y.Data, y.Inc, c, s) -} - -// Rotm applies the modified Givens rotation to n points represented by the -// vectors x and y. -func Rotm(x, y Vector, p blas.DrotmParams) { - if x.N != y.N { - panic(badLength) - } - blas64.Drotm(x.N, x.Data, x.Inc, y.Data, y.Inc, p) -} - -// Scal scales the vector x by alpha: -// x[i] *= alpha for all i. -// -// Scal will panic if the vector increment is negative. -func Scal(alpha float64, x Vector) { - if x.Inc < 0 { - panic(negInc) - } - blas64.Dscal(x.N, alpha, x.Data, x.Inc) -} - -// Level 2 - -// Gemv computes -// y = alpha * A * x + beta * y, if t == blas.NoTrans, -// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. -func Gemv(t blas.Transpose, alpha float64, a General, x Vector, beta float64, y Vector) { - blas64.Dgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Gbmv computes -// y = alpha * A * x + beta * y, if t == blas.NoTrans, -// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, -// where A is an m×n band matrix, x and y are vectors, and alpha and beta are scalars. -func Gbmv(t blas.Transpose, alpha float64, a Band, x Vector, beta float64, y Vector) { - blas64.Dgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Trmv computes -// x = A * x, if t == blas.NoTrans, -// x = A^T * x, if t == blas.Trans or blas.ConjTrans, -// where A is an n×n triangular matrix, and x is a vector. -func Trmv(t blas.Transpose, a Triangular, x Vector) { - blas64.Dtrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tbmv computes -// x = A * x, if t == blas.NoTrans, -// x = A^T * x, if t == blas.Trans or blas.ConjTrans, -// where A is an n×n triangular band matrix, and x is a vector. -func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { - blas64.Dtbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tpmv computes -// x = A * x, if t == blas.NoTrans, -// x = A^T * x, if t == blas.Trans or blas.ConjTrans, -// where A is an n×n triangular matrix in packed format, and x is a vector. -func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { - blas64.Dtpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) -} - -// Trsv solves -// A * x = b, if t == blas.NoTrans, -// A^T * x = b, if t == blas.Trans or blas.ConjTrans, -// where A is an n×n triangular matrix, and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func Trsv(t blas.Transpose, a Triangular, x Vector) { - blas64.Dtrsv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tbsv solves -// A * x = b, if t == blas.NoTrans, -// A^T * x = b, if t == blas.Trans or blas.ConjTrans, -// where A is an n×n triangular band matrix, and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { - blas64.Dtbsv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tpsv solves -// A * x = b, if t == blas.NoTrans, -// A^T * x = b, if t == blas.Trans or blas.ConjTrans, -// where A is an n×n triangular matrix in packed format, and x and b are -// vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { - blas64.Dtpsv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) -} - -// Symv computes -// y = alpha * A * x + beta * y, -// where A is an n×n symmetric matrix, x and y are vectors, and alpha and -// beta are scalars. -func Symv(alpha float64, a Symmetric, x Vector, beta float64, y Vector) { - blas64.Dsymv(a.Uplo, a.N, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Sbmv performs -// y = alpha * A * x + beta * y, -// where A is an n×n symmetric band matrix, x and y are vectors, and alpha -// and beta are scalars. -func Sbmv(alpha float64, a SymmetricBand, x Vector, beta float64, y Vector) { - blas64.Dsbmv(a.Uplo, a.N, a.K, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Spmv performs -// y = alpha * A * x + beta * y, -// where A is an n×n symmetric matrix in packed format, x and y are vectors, -// and alpha and beta are scalars. -func Spmv(alpha float64, a SymmetricPacked, x Vector, beta float64, y Vector) { - blas64.Dspmv(a.Uplo, a.N, alpha, a.Data, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Ger performs a rank-1 update -// A += alpha * x * y^T, -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func Ger(alpha float64, x, y Vector, a General) { - blas64.Dger(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) -} - -// Syr performs a rank-1 update -// A += alpha * x * x^T, -// where A is an n×n symmetric matrix, x is a vector, and alpha is a scalar. -func Syr(alpha float64, x Vector, a Symmetric) { - blas64.Dsyr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) -} - -// Spr performs the rank-1 update -// A += alpha * x * x^T, -// where A is an n×n symmetric matrix in packed format, x is a vector, and -// alpha is a scalar. -func Spr(alpha float64, x Vector, a SymmetricPacked) { - blas64.Dspr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data) -} - -// Syr2 performs a rank-2 update -// A += alpha * x * y^T + alpha * y * x^T, -// where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. -func Syr2(alpha float64, x, y Vector, a Symmetric) { - blas64.Dsyr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) -} - -// Spr2 performs a rank-2 update -// A += alpha * x * y^T + alpha * y * x^T, -// where A is an n×n symmetric matrix in packed format, x and y are vectors, -// and alpha is a scalar. -func Spr2(alpha float64, x, y Vector, a SymmetricPacked) { - blas64.Dspr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data) -} - -// Level 3 - -// Gemm computes -// C = alpha * A * B + beta * C, -// where A, B, and C are dense matrices, and alpha and beta are scalars. -// tA and tB specify whether A or B are transposed. -func Gemm(tA, tB blas.Transpose, alpha float64, a, b General, beta float64, c General) { - var m, n, k int - if tA == blas.NoTrans { - m, k = a.Rows, a.Cols - } else { - m, k = a.Cols, a.Rows - } - if tB == blas.NoTrans { - n = b.Cols - } else { - n = b.Rows - } - blas64.Dgemm(tA, tB, m, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} - -// Symm performs -// C = alpha * A * B + beta * C, if s == blas.Left, -// C = alpha * B * A + beta * C, if s == blas.Right, -// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and -// alpha is a scalar. -func Symm(s blas.Side, alpha float64, a Symmetric, b General, beta float64, c General) { - var m, n int - if s == blas.Left { - m, n = a.N, b.Cols - } else { - m, n = b.Rows, a.N - } - blas64.Dsymm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} - -// Syrk performs a symmetric rank-k update -// C = alpha * A * A^T + beta * C, if t == blas.NoTrans, -// C = alpha * A^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, -// where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans and -// a k×n matrix otherwise, and alpha and beta are scalars. -func Syrk(t blas.Transpose, alpha float64, a General, beta float64, c Symmetric) { - var n, k int - if t == blas.NoTrans { - n, k = a.Rows, a.Cols - } else { - n, k = a.Cols, a.Rows - } - blas64.Dsyrk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) -} - -// Syr2k performs a symmetric rank-2k update -// C = alpha * A * B^T + alpha * B * A^T + beta * C, if t == blas.NoTrans, -// C = alpha * A^T * B + alpha * B^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, -// where C is an n×n symmetric matrix, A and B are n×k matrices if t == NoTrans -// and k×n matrices otherwise, and alpha and beta are scalars. -func Syr2k(t blas.Transpose, alpha float64, a, b General, beta float64, c Symmetric) { - var n, k int - if t == blas.NoTrans { - n, k = a.Rows, a.Cols - } else { - n, k = a.Cols, a.Rows - } - blas64.Dsyr2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} - -// Trmm performs -// B = alpha * A * B, if tA == blas.NoTrans and s == blas.Left, -// B = alpha * A^T * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, -// B = alpha * B * A, if tA == blas.NoTrans and s == blas.Right, -// B = alpha * B * A^T, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, -// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is -// a scalar. -func Trmm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { - blas64.Dtrmm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) -} - -// Trsm solves -// A * X = alpha * B, if tA == blas.NoTrans and s == blas.Left, -// A^T * X = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, -// X * A = alpha * B, if tA == blas.NoTrans and s == blas.Right, -// X * A^T = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, -// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and -// alpha is a scalar. -// -// At entry to the function, X contains the values of B, and the result is -// stored in-place into X. -// -// No check is made that A is invertible. -func Trsm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { - blas64.Dtrsm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) -} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go deleted file mode 100644 index 882fd8a71..000000000 --- a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blas64 - -import "gonum.org/v1/gonum/blas" - -// GeneralCols represents a matrix using the conventional column-major storage scheme. -type GeneralCols General - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions as a and have adequate backing -// data storage. -func (t GeneralCols) From(a General) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("blas64: mismatched dimension") - } - if len(t.Data) < (t.Cols-1)*t.Stride+t.Rows { - panic("blas64: short data slice") - } - for i := 0; i < a.Rows; i++ { - for j, v := range a.Data[i*a.Stride : i*a.Stride+a.Cols] { - t.Data[i+j*t.Stride] = v - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions as a and have adequate backing -// data storage. -func (t General) From(a GeneralCols) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("blas64: mismatched dimension") - } - if len(t.Data) < (t.Rows-1)*t.Stride+t.Cols { - panic("blas64: short data slice") - } - for j := 0; j < a.Cols; j++ { - for i, v := range a.Data[j*a.Stride : j*a.Stride+a.Rows] { - t.Data[i*t.Stride+j] = v - } - } -} - -// TriangularCols represents a matrix using the conventional column-major storage scheme. -type TriangularCols Triangular - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, uplo and diag as a and have -// adequate backing data storage. -func (t TriangularCols) From(a Triangular) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("blas64: mismatched BLAS diag") - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - case blas.All: - for i := 0; i < a.N; i++ { - for j := 0; j < a.N; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, uplo and diag as a and have -// adequate backing data storage. -func (t Triangular) From(a TriangularCols) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("blas64: mismatched BLAS diag") - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - case blas.All: - for i := 0; i < a.N; i++ { - for j := 0; j < a.N; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - } -} - -// BandCols represents a matrix using the band column-major storage scheme. -type BandCols Band - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and bandwidth as a and have -// adequate backing data storage. -func (t BandCols) From(a Band) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("blas64: mismatched dimension") - } - if t.KL != a.KL || t.KU != a.KU { - panic("blas64: mismatched bandwidth") - } - if a.Stride < a.KL+a.KU+1 { - panic("blas64: short stride for source") - } - if t.Stride < t.KL+t.KU+1 { - panic("blas64: short stride for destination") - } - for i := 0; i < a.Rows; i++ { - for j := max(0, i-a.KL); j < min(i+a.KU+1, a.Cols); j++ { - t.Data[i+t.KU-j+j*t.Stride] = a.Data[j+a.KL-i+i*a.Stride] - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and bandwidth as a and have -// adequate backing data storage. -func (t Band) From(a BandCols) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("blas64: mismatched dimension") - } - if t.KL != a.KL || t.KU != a.KU { - panic("blas64: mismatched bandwidth") - } - if a.Stride < a.KL+a.KU+1 { - panic("blas64: short stride for source") - } - if t.Stride < t.KL+t.KU+1 { - panic("blas64: short stride for destination") - } - for j := 0; j < a.Cols; j++ { - for i := max(0, j-a.KU); i < min(j+a.KL+1, a.Rows); i++ { - t.Data[j+a.KL-i+i*a.Stride] = a.Data[i+t.KU-j+j*t.Stride] - } - } -} - -// TriangularBandCols represents a symmetric matrix using the band column-major storage scheme. -type TriangularBandCols TriangularBand - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t TriangularBandCols) From(a TriangularBand) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.K != a.K { - panic("blas64: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("blas64: short stride for source") - } - if t.Stride < t.K+1 { - panic("blas64: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("blas64: mismatched BLAS diag") - } - dst := BandCols{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := Band{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t TriangularBand) From(a TriangularBandCols) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.K != a.K { - panic("blas64: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("blas64: short stride for source") - } - if t.Stride < t.K+1 { - panic("blas64: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("blas64: mismatched BLAS diag") - } - dst := Band{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := BandCols{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go deleted file mode 100644 index 5146f1a1c..000000000 --- a/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blas64 - -import "gonum.org/v1/gonum/blas" - -// SymmetricCols represents a matrix using the conventional column-major storage scheme. -type SymmetricCols Symmetric - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and uplo as a and have adequate -// backing data storage. -func (t SymmetricCols) From(a Symmetric) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and uplo as a and have adequate -// backing data storage. -func (t Symmetric) From(a SymmetricCols) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - } -} - -// SymmetricBandCols represents a symmetric matrix using the band column-major storage scheme. -type SymmetricBandCols SymmetricBand - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t SymmetricBandCols) From(a SymmetricBand) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.K != a.K { - panic("blas64: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("blas64: short stride for source") - } - if t.Stride < t.K+1 { - panic("blas64: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - dst := BandCols{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := Band{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t SymmetricBand) From(a SymmetricBandCols) { - if t.N != a.N { - panic("blas64: mismatched dimension") - } - if t.K != a.K { - panic("blas64: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("blas64: short stride for source") - } - if t.Stride < t.K+1 { - panic("blas64: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("blas64: mismatched BLAS uplo") - } - dst := Band{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := BandCols{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("blas64: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/doc.go b/vendor/gonum.org/v1/gonum/blas/blas64/doc.go deleted file mode 100644 index 7410cee48..000000000 --- a/vendor/gonum.org/v1/gonum/blas/blas64/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blas64 provides a simple interface to the float64 BLAS API. -package blas64 // import "gonum.org/v1/gonum/blas/blas64" diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go b/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go deleted file mode 100644 index 1205da8af..000000000 --- a/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cblas128 - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/gonum" -) - -var cblas128 blas.Complex128 = gonum.Implementation{} - -// Use sets the BLAS complex128 implementation to be used by subsequent BLAS calls. -// The default implementation is -// gonum.org/v1/gonum/blas/gonum.Implementation. -func Use(b blas.Complex128) { - cblas128 = b -} - -// Implementation returns the current BLAS complex128 implementation. -// -// Implementation allows direct calls to the current the BLAS complex128 implementation -// giving finer control of parameters. -func Implementation() blas.Complex128 { - return cblas128 -} - -// Vector represents a vector with an associated element increment. -type Vector struct { - Inc int - Data []complex128 -} - -// General represents a matrix using the conventional storage scheme. -type General struct { - Rows, Cols int - Stride int - Data []complex128 -} - -// Band represents a band matrix using the band storage scheme. -type Band struct { - Rows, Cols int - KL, KU int - Stride int - Data []complex128 -} - -// Triangular represents a triangular matrix using the conventional storage scheme. -type Triangular struct { - N int - Stride int - Data []complex128 - Uplo blas.Uplo - Diag blas.Diag -} - -// TriangularBand represents a triangular matrix using the band storage scheme. -type TriangularBand struct { - N, K int - Stride int - Data []complex128 - Uplo blas.Uplo - Diag blas.Diag -} - -// TriangularPacked represents a triangular matrix using the packed storage scheme. -type TriangularPacked struct { - N int - Data []complex128 - Uplo blas.Uplo - Diag blas.Diag -} - -// Symmetric represents a symmetric matrix using the conventional storage scheme. -type Symmetric struct { - N int - Stride int - Data []complex128 - Uplo blas.Uplo -} - -// SymmetricBand represents a symmetric matrix using the band storage scheme. -type SymmetricBand struct { - N, K int - Stride int - Data []complex128 - Uplo blas.Uplo -} - -// SymmetricPacked represents a symmetric matrix using the packed storage scheme. -type SymmetricPacked struct { - N int - Data []complex128 - Uplo blas.Uplo -} - -// Hermitian represents an Hermitian matrix using the conventional storage scheme. -type Hermitian Symmetric - -// HermitianBand represents an Hermitian matrix using the band storage scheme. -type HermitianBand SymmetricBand - -// HermitianPacked represents an Hermitian matrix using the packed storage scheme. -type HermitianPacked SymmetricPacked - -// Level 1 - -const negInc = "cblas128: negative vector increment" - -// Dotu computes the dot product of the two vectors without -// complex conjugation: -// x^T * y. -func Dotu(n int, x, y Vector) complex128 { - return cblas128.Zdotu(n, x.Data, x.Inc, y.Data, y.Inc) -} - -// Dotc computes the dot product of the two vectors with -// complex conjugation: -// x^H * y. -func Dotc(n int, x, y Vector) complex128 { - return cblas128.Zdotc(n, x.Data, x.Inc, y.Data, y.Inc) -} - -// Nrm2 computes the Euclidean norm of the vector x: -// sqrt(\sum_i x[i] * x[i]). -// -// Nrm2 will panic if the vector increment is negative. -func Nrm2(n int, x Vector) float64 { - if x.Inc < 0 { - panic(negInc) - } - return cblas128.Dznrm2(n, x.Data, x.Inc) -} - -// Asum computes the sum of magnitudes of the real and imaginary parts of -// elements of the vector x: -// \sum_i (|Re x[i]| + |Im x[i]|). -// -// Asum will panic if the vector increment is negative. -func Asum(n int, x Vector) float64 { - if x.Inc < 0 { - panic(negInc) - } - return cblas128.Dzasum(n, x.Data, x.Inc) -} - -// Iamax returns the index of an element of x with the largest sum of -// magnitudes of the real and imaginary parts (|Re x[i]|+|Im x[i]|). -// If there are multiple such indices, the earliest is returned. -// -// Iamax returns -1 if n == 0. -// -// Iamax will panic if the vector increment is negative. -func Iamax(n int, x Vector) int { - if x.Inc < 0 { - panic(negInc) - } - return cblas128.Izamax(n, x.Data, x.Inc) -} - -// Swap exchanges the elements of two vectors: -// x[i], y[i] = y[i], x[i] for all i. -func Swap(n int, x, y Vector) { - cblas128.Zswap(n, x.Data, x.Inc, y.Data, y.Inc) -} - -// Copy copies the elements of x into the elements of y: -// y[i] = x[i] for all i. -func Copy(n int, x, y Vector) { - cblas128.Zcopy(n, x.Data, x.Inc, y.Data, y.Inc) -} - -// Axpy computes -// y = alpha * x + y, -// where x and y are vectors, and alpha is a scalar. -func Axpy(n int, alpha complex128, x, y Vector) { - cblas128.Zaxpy(n, alpha, x.Data, x.Inc, y.Data, y.Inc) -} - -// Scal computes -// x = alpha * x, -// where x is a vector, and alpha is a scalar. -// -// Scal will panic if the vector increment is negative. -func Scal(n int, alpha complex128, x Vector) { - if x.Inc < 0 { - panic(negInc) - } - cblas128.Zscal(n, alpha, x.Data, x.Inc) -} - -// Dscal computes -// x = alpha * x, -// where x is a vector, and alpha is a real scalar. -// -// Dscal will panic if the vector increment is negative. -func Dscal(n int, alpha float64, x Vector) { - if x.Inc < 0 { - panic(negInc) - } - cblas128.Zdscal(n, alpha, x.Data, x.Inc) -} - -// Level 2 - -// Gemv computes -// y = alpha * A * x + beta * y, if t == blas.NoTrans, -// y = alpha * A^T * x + beta * y, if t == blas.Trans, -// y = alpha * A^H * x + beta * y, if t == blas.ConjTrans, -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are -// scalars. -func Gemv(t blas.Transpose, alpha complex128, a General, x Vector, beta complex128, y Vector) { - cblas128.Zgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Gbmv computes -// y = alpha * A * x + beta * y, if t == blas.NoTrans, -// y = alpha * A^T * x + beta * y, if t == blas.Trans, -// y = alpha * A^H * x + beta * y, if t == blas.ConjTrans, -// where A is an m×n band matrix, x and y are vectors, and alpha and beta are -// scalars. -func Gbmv(t blas.Transpose, alpha complex128, a Band, x Vector, beta complex128, y Vector) { - cblas128.Zgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Trmv computes -// x = A * x, if t == blas.NoTrans, -// x = A^T * x, if t == blas.Trans, -// x = A^H * x, if t == blas.ConjTrans, -// where A is an n×n triangular matrix, and x is a vector. -func Trmv(t blas.Transpose, a Triangular, x Vector) { - cblas128.Ztrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tbmv computes -// x = A * x, if t == blas.NoTrans, -// x = A^T * x, if t == blas.Trans, -// x = A^H * x, if t == blas.ConjTrans, -// where A is an n×n triangular band matrix, and x is a vector. -func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { - cblas128.Ztbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tpmv computes -// x = A * x, if t == blas.NoTrans, -// x = A^T * x, if t == blas.Trans, -// x = A^H * x, if t == blas.ConjTrans, -// where A is an n×n triangular matrix in packed format, and x is a vector. -func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { - cblas128.Ztpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) -} - -// Trsv solves -// A * x = b, if t == blas.NoTrans, -// A^T * x = b, if t == blas.Trans, -// A^H * x = b, if t == blas.ConjTrans, -// where A is an n×n triangular matrix and x is a vector. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func Trsv(t blas.Transpose, a Triangular, x Vector) { - cblas128.Ztrsv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tbsv solves -// A * x = b, if t == blas.NoTrans, -// A^T * x = b, if t == blas.Trans, -// A^H * x = b, if t == blas.ConjTrans, -// where A is an n×n triangular band matrix, and x is a vector. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { - cblas128.Ztbsv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) -} - -// Tpsv solves -// A * x = b, if t == blas.NoTrans, -// A^T * x = b, if t == blas.Trans, -// A^H * x = b, if t == blas.ConjTrans, -// where A is an n×n triangular matrix in packed format and x is a vector. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { - cblas128.Ztpsv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) -} - -// Hemv computes -// y = alpha * A * x + beta * y, -// where A is an n×n Hermitian matrix, x and y are vectors, and alpha and -// beta are scalars. -func Hemv(alpha complex128, a Hermitian, x Vector, beta complex128, y Vector) { - cblas128.Zhemv(a.Uplo, a.N, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Hbmv performs -// y = alpha * A * x + beta * y, -// where A is an n×n Hermitian band matrix, x and y are vectors, and alpha -// and beta are scalars. -func Hbmv(alpha complex128, a HermitianBand, x Vector, beta complex128, y Vector) { - cblas128.Zhbmv(a.Uplo, a.N, a.K, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Hpmv performs -// y = alpha * A * x + beta * y, -// where A is an n×n Hermitian matrix in packed format, x and y are vectors, -// and alpha and beta are scalars. -func Hpmv(alpha complex128, a HermitianPacked, x Vector, beta complex128, y Vector) { - cblas128.Zhpmv(a.Uplo, a.N, alpha, a.Data, x.Data, x.Inc, beta, y.Data, y.Inc) -} - -// Geru performs a rank-1 update -// A += alpha * x * y^T, -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func Geru(alpha complex128, x, y Vector, a General) { - cblas128.Zgeru(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) -} - -// Gerc performs a rank-1 update -// A += alpha * x * y^H, -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func Gerc(alpha complex128, x, y Vector, a General) { - cblas128.Zgerc(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) -} - -// Her performs a rank-1 update -// A += alpha * x * y^T, -// where A is an m×n Hermitian matrix, x and y are vectors, and alpha is a scalar. -func Her(alpha float64, x Vector, a Hermitian) { - cblas128.Zher(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) -} - -// Hpr performs a rank-1 update -// A += alpha * x * x^H, -// where A is an n×n Hermitian matrix in packed format, x is a vector, and -// alpha is a scalar. -func Hpr(alpha float64, x Vector, a HermitianPacked) { - cblas128.Zhpr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data) -} - -// Her2 performs a rank-2 update -// A += alpha * x * y^H + conj(alpha) * y * x^H, -// where A is an n×n Hermitian matrix, x and y are vectors, and alpha is a scalar. -func Her2(alpha complex128, x, y Vector, a Hermitian) { - cblas128.Zher2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) -} - -// Hpr2 performs a rank-2 update -// A += alpha * x * y^H + conj(alpha) * y * x^H, -// where A is an n×n Hermitian matrix in packed format, x and y are vectors, -// and alpha is a scalar. -func Hpr2(alpha complex128, x, y Vector, a HermitianPacked) { - cblas128.Zhpr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data) -} - -// Level 3 - -// Gemm computes -// C = alpha * A * B + beta * C, -// where A, B, and C are dense matrices, and alpha and beta are scalars. -// tA and tB specify whether A or B are transposed or conjugated. -func Gemm(tA, tB blas.Transpose, alpha complex128, a, b General, beta complex128, c General) { - var m, n, k int - if tA == blas.NoTrans { - m, k = a.Rows, a.Cols - } else { - m, k = a.Cols, a.Rows - } - if tB == blas.NoTrans { - n = b.Cols - } else { - n = b.Rows - } - cblas128.Zgemm(tA, tB, m, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} - -// Symm performs -// C = alpha * A * B + beta * C, if s == blas.Left, -// C = alpha * B * A + beta * C, if s == blas.Right, -// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and -// alpha and beta are scalars. -func Symm(s blas.Side, alpha complex128, a Symmetric, b General, beta complex128, c General) { - var m, n int - if s == blas.Left { - m, n = a.N, b.Cols - } else { - m, n = b.Rows, a.N - } - cblas128.Zsymm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} - -// Syrk performs a symmetric rank-k update -// C = alpha * A * A^T + beta * C, if t == blas.NoTrans, -// C = alpha * A^T * A + beta * C, if t == blas.Trans, -// where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans -// and a k×n matrix otherwise, and alpha and beta are scalars. -func Syrk(t blas.Transpose, alpha complex128, a General, beta complex128, c Symmetric) { - var n, k int - if t == blas.NoTrans { - n, k = a.Rows, a.Cols - } else { - n, k = a.Cols, a.Rows - } - cblas128.Zsyrk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) -} - -// Syr2k performs a symmetric rank-2k update -// C = alpha * A * B^T + alpha * B * A^T + beta * C, if t == blas.NoTrans, -// C = alpha * A^T * B + alpha * B^T * A + beta * C, if t == blas.Trans, -// where C is an n×n symmetric matrix, A and B are n×k matrices if -// t == blas.NoTrans and k×n otherwise, and alpha and beta are scalars. -func Syr2k(t blas.Transpose, alpha complex128, a, b General, beta complex128, c Symmetric) { - var n, k int - if t == blas.NoTrans { - n, k = a.Rows, a.Cols - } else { - n, k = a.Cols, a.Rows - } - cblas128.Zsyr2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} - -// Trmm performs -// B = alpha * A * B, if tA == blas.NoTrans and s == blas.Left, -// B = alpha * A^T * B, if tA == blas.Trans and s == blas.Left, -// B = alpha * A^H * B, if tA == blas.ConjTrans and s == blas.Left, -// B = alpha * B * A, if tA == blas.NoTrans and s == blas.Right, -// B = alpha * B * A^T, if tA == blas.Trans and s == blas.Right, -// B = alpha * B * A^H, if tA == blas.ConjTrans and s == blas.Right, -// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is -// a scalar. -func Trmm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b General) { - cblas128.Ztrmm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) -} - -// Trsm solves -// A * X = alpha * B, if tA == blas.NoTrans and s == blas.Left, -// A^T * X = alpha * B, if tA == blas.Trans and s == blas.Left, -// A^H * X = alpha * B, if tA == blas.ConjTrans and s == blas.Left, -// X * A = alpha * B, if tA == blas.NoTrans and s == blas.Right, -// X * A^T = alpha * B, if tA == blas.Trans and s == blas.Right, -// X * A^H = alpha * B, if tA == blas.ConjTrans and s == blas.Right, -// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and -// alpha is a scalar. -// -// At entry to the function, b contains the values of B, and the result is -// stored in-place into b. -// -// No check is made that A is invertible. -func Trsm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b General) { - cblas128.Ztrsm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) -} - -// Hemm performs -// C = alpha * A * B + beta * C, if s == blas.Left, -// C = alpha * B * A + beta * C, if s == blas.Right, -// where A is an n×n or m×m Hermitian matrix, B and C are m×n matrices, and -// alpha and beta are scalars. -func Hemm(s blas.Side, alpha complex128, a Hermitian, b General, beta complex128, c General) { - var m, n int - if s == blas.Left { - m, n = a.N, b.Cols - } else { - m, n = b.Rows, a.N - } - cblas128.Zhemm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} - -// Herk performs the Hermitian rank-k update -// C = alpha * A * A^H + beta*C, if t == blas.NoTrans, -// C = alpha * A^H * A + beta*C, if t == blas.ConjTrans, -// where C is an n×n Hermitian matrix, A is an n×k matrix if t == blas.NoTrans -// and a k×n matrix otherwise, and alpha and beta are scalars. -func Herk(t blas.Transpose, alpha float64, a General, beta float64, c Hermitian) { - var n, k int - if t == blas.NoTrans { - n, k = a.Rows, a.Cols - } else { - n, k = a.Cols, a.Rows - } - cblas128.Zherk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) -} - -// Her2k performs the Hermitian rank-2k update -// C = alpha * A * B^H + conj(alpha) * B * A^H + beta * C, if t == blas.NoTrans, -// C = alpha * A^H * B + conj(alpha) * B^H * A + beta * C, if t == blas.ConjTrans, -// where C is an n×n Hermitian matrix, A and B are n×k matrices if t == NoTrans -// and k×n matrices otherwise, and alpha and beta are scalars. -func Her2k(t blas.Transpose, alpha complex128, a, b General, beta float64, c Hermitian) { - var n, k int - if t == blas.NoTrans { - n, k = a.Rows, a.Cols - } else { - n, k = a.Cols, a.Rows - } - cblas128.Zher2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) -} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go deleted file mode 100644 index 93e3cd2f9..000000000 --- a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go +++ /dev/null @@ -1,279 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cblas128 - -import "gonum.org/v1/gonum/blas" - -// GeneralCols represents a matrix using the conventional column-major storage scheme. -type GeneralCols General - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions as a and have adequate backing -// data storage. -func (t GeneralCols) From(a General) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("cblas128: mismatched dimension") - } - if len(t.Data) < (t.Cols-1)*t.Stride+t.Rows { - panic("cblas128: short data slice") - } - for i := 0; i < a.Rows; i++ { - for j, v := range a.Data[i*a.Stride : i*a.Stride+a.Cols] { - t.Data[i+j*t.Stride] = v - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions as a and have adequate backing -// data storage. -func (t General) From(a GeneralCols) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("cblas128: mismatched dimension") - } - if len(t.Data) < (t.Rows-1)*t.Stride+t.Cols { - panic("cblas128: short data slice") - } - for j := 0; j < a.Cols; j++ { - for i, v := range a.Data[j*a.Stride : j*a.Stride+a.Rows] { - t.Data[i*t.Stride+j] = v - } - } -} - -// TriangularCols represents a matrix using the conventional column-major storage scheme. -type TriangularCols Triangular - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, uplo and diag as a and have -// adequate backing data storage. -func (t TriangularCols) From(a Triangular) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("cblas128: mismatched BLAS diag") - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - case blas.All: - for i := 0; i < a.N; i++ { - for j := 0; j < a.N; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, uplo and diag as a and have -// adequate backing data storage. -func (t Triangular) From(a TriangularCols) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("cblas128: mismatched BLAS diag") - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - case blas.All: - for i := 0; i < a.N; i++ { - for j := 0; j < a.N; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - } -} - -// BandCols represents a matrix using the band column-major storage scheme. -type BandCols Band - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and bandwidth as a and have -// adequate backing data storage. -func (t BandCols) From(a Band) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("cblas128: mismatched dimension") - } - if t.KL != a.KL || t.KU != a.KU { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.KL+a.KU+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.KL+t.KU+1 { - panic("cblas128: short stride for destination") - } - for i := 0; i < a.Rows; i++ { - for j := max(0, i-a.KL); j < min(i+a.KU+1, a.Cols); j++ { - t.Data[i+t.KU-j+j*t.Stride] = a.Data[j+a.KL-i+i*a.Stride] - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and bandwidth as a and have -// adequate backing data storage. -func (t Band) From(a BandCols) { - if t.Rows != a.Rows || t.Cols != a.Cols { - panic("cblas128: mismatched dimension") - } - if t.KL != a.KL || t.KU != a.KU { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.KL+a.KU+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.KL+t.KU+1 { - panic("cblas128: short stride for destination") - } - for j := 0; j < a.Cols; j++ { - for i := max(0, j-a.KU); i < min(j+a.KL+1, a.Rows); i++ { - t.Data[j+a.KL-i+i*a.Stride] = a.Data[i+t.KU-j+j*t.Stride] - } - } -} - -// TriangularBandCols represents a symmetric matrix using the band column-major storage scheme. -type TriangularBandCols TriangularBand - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t TriangularBandCols) From(a TriangularBand) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.K != a.K { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.K+1 { - panic("cblas128: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("cblas128: mismatched BLAS diag") - } - dst := BandCols{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := Band{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t TriangularBand) From(a TriangularBandCols) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.K != a.K { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.K+1 { - panic("cblas128: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - if t.Diag != a.Diag { - panic("cblas128: mismatched BLAS diag") - } - dst := Band{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := BandCols{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go deleted file mode 100644 index 51c3a5777..000000000 --- a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cblas128 - -import "gonum.org/v1/gonum/blas" - -// HermitianCols represents a matrix using the conventional column-major storage scheme. -type HermitianCols Hermitian - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and uplo as a and have adequate -// backing data storage. -func (t HermitianCols) From(a Hermitian) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and uplo as a and have adequate -// backing data storage. -func (t Hermitian) From(a HermitianCols) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - } -} - -// HermitianBandCols represents an Hermitian matrix using the band column-major storage scheme. -type HermitianBandCols HermitianBand - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t HermitianBandCols) From(a HermitianBand) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.K != a.K { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.K+1 { - panic("cblas128: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - dst := BandCols{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := Band{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t HermitianBand) From(a HermitianBandCols) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.K != a.K { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.K+1 { - panic("cblas128: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - dst := Band{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := BandCols{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go deleted file mode 100644 index f1bf40c20..000000000 --- a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cblas128 - -import "gonum.org/v1/gonum/blas" - -// SymmetricCols represents a matrix using the conventional column-major storage scheme. -type SymmetricCols Symmetric - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and uplo as a and have adequate -// backing data storage. -func (t SymmetricCols) From(a Symmetric) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] - } - } - } -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions and uplo as a and have adequate -// backing data storage. -func (t Symmetric) From(a SymmetricCols) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - for i := 0; i < a.N; i++ { - for j := i; j < a.N; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - case blas.Lower: - for i := 0; i < a.N; i++ { - for j := 0; j <= i; j++ { - t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] - } - } - } -} - -// SymmetricBandCols represents a symmetric matrix using the band column-major storage scheme. -type SymmetricBandCols SymmetricBand - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t SymmetricBandCols) From(a SymmetricBand) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.K != a.K { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.K+1 { - panic("cblas128: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - dst := BandCols{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := Band{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} - -// From fills the receiver with elements from a. The receiver -// must have the same dimensions, bandwidth and uplo as a and -// have adequate backing data storage. -func (t SymmetricBand) From(a SymmetricBandCols) { - if t.N != a.N { - panic("cblas128: mismatched dimension") - } - if t.K != a.K { - panic("cblas128: mismatched bandwidth") - } - if a.Stride < a.K+1 { - panic("cblas128: short stride for source") - } - if t.Stride < t.K+1 { - panic("cblas128: short stride for destination") - } - if t.Uplo != a.Uplo { - panic("cblas128: mismatched BLAS uplo") - } - dst := Band{ - Rows: t.N, Cols: t.N, - Stride: t.Stride, - Data: t.Data, - } - src := BandCols{ - Rows: a.N, Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } - switch a.Uplo { - default: - panic("cblas128: bad BLAS uplo") - case blas.Upper: - dst.KU = t.K - src.KU = a.K - case blas.Lower: - dst.KL = t.K - src.KL = a.K - } - dst.From(src) -} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go b/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go deleted file mode 100644 index 09719b19e..000000000 --- a/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cblas128 provides a simple interface to the complex128 BLAS API. -package cblas128 // import "gonum.org/v1/gonum/blas/cblas128" diff --git a/vendor/gonum.org/v1/gonum/blas/doc.go b/vendor/gonum.org/v1/gonum/blas/doc.go deleted file mode 100644 index ea4b16c90..000000000 --- a/vendor/gonum.org/v1/gonum/blas/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package blas provides interfaces for the BLAS linear algebra standard. - -All methods must perform appropriate parameter checking and panic if -provided parameters that do not conform to the requirements specified -by the BLAS standard. - -Quick Reference Guide to the BLAS from http://www.netlib.org/lapack/lug/node145.html - -This version is modified to remove the "order" option. All matrix operations are -on row-order matrices. - -Level 1 BLAS - - dim scalar vector vector scalars 5-element prefixes - struct - - _rotg ( a, b ) S, D - _rotmg( d1, d2, a, b ) S, D - _rot ( n, x, incX, y, incY, c, s ) S, D - _rotm ( n, x, incX, y, incY, param ) S, D - _swap ( n, x, incX, y, incY ) S, D, C, Z - _scal ( n, alpha, x, incX ) S, D, C, Z, Cs, Zd - _copy ( n, x, incX, y, incY ) S, D, C, Z - _axpy ( n, alpha, x, incX, y, incY ) S, D, C, Z - _dot ( n, x, incX, y, incY ) S, D, Ds - _dotu ( n, x, incX, y, incY ) C, Z - _dotc ( n, x, incX, y, incY ) C, Z - __dot ( n, alpha, x, incX, y, incY ) Sds - _nrm2 ( n, x, incX ) S, D, Sc, Dz - _asum ( n, x, incX ) S, D, Sc, Dz - I_amax( n, x, incX ) s, d, c, z - -Level 2 BLAS - - options dim b-width scalar matrix vector scalar vector prefixes - - _gemv ( trans, m, n, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z - _gbmv ( trans, m, n, kL, kU, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z - _hemv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) C, Z - _hbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) C, Z - _hpmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) C, Z - _symv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) S, D - _sbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) S, D - _spmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) S, D - _trmv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z - _tbmv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z - _tpmv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z - _trsv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z - _tbsv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z - _tpsv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z - - options dim scalar vector vector matrix prefixes - - _ger ( m, n, alpha, x, incX, y, incY, a, lda ) S, D - _geru ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z - _gerc ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z - _her ( uplo, n, alpha, x, incX, a, lda ) C, Z - _hpr ( uplo, n, alpha, x, incX, ap ) C, Z - _her2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) C, Z - _hpr2 ( uplo, n, alpha, x, incX, y, incY, ap ) C, Z - _syr ( uplo, n, alpha, x, incX, a, lda ) S, D - _spr ( uplo, n, alpha, x, incX, ap ) S, D - _syr2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) S, D - _spr2 ( uplo, n, alpha, x, incX, y, incY, ap ) S, D - -Level 3 BLAS - - options dim scalar matrix matrix scalar matrix prefixes - - _gemm ( transA, transB, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z - _symm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z - _hemm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z - _syrk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) S, D, C, Z - _herk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) C, Z - _syr2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z - _her2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z - _trmm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z - _trsm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z - -Meaning of prefixes - - S - float32 C - complex64 - D - float64 Z - complex128 - -Matrix types - - GE - GEneral GB - General Band - SY - SYmmetric SB - Symmetric Band SP - Symmetric Packed - HE - HErmitian HB - Hermitian Band HP - Hermitian Packed - TR - TRiangular TB - Triangular Band TP - Triangular Packed - -Options - - trans = NoTrans, Trans, ConjTrans - uplo = Upper, Lower - diag = Nonunit, Unit - side = Left, Right (A or op(A) on the left, or A or op(A) on the right) - -For real matrices, Trans and ConjTrans have the same meaning. -For Hermitian matrices, trans = Trans is not allowed. -For complex symmetric matrices, trans = ConjTrans is not allowed. -*/ -package blas // import "gonum.org/v1/gonum/blas" diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go b/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go deleted file mode 100644 index ec3fcc61c..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "runtime" - "sync" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f64" -) - -// Dgemm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C -// C = alpha * A^T * B + beta * C -// C = alpha * A * B^T + beta * C -// C = alpha * A^T * B^T + beta * C -// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is -// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or -// B are transposed. -func (Implementation) Dgemm(tA, tB blas.Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { - switch tA { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch tB { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - aTrans := tA == blas.Trans || tA == blas.ConjTrans - if aTrans { - if lda < max(1, m) { - panic(badLdA) - } - } else { - if lda < max(1, k) { - panic(badLdA) - } - } - bTrans := tB == blas.Trans || tB == blas.ConjTrans - if bTrans { - if ldb < max(1, k) { - panic(badLdB) - } - } else { - if ldb < max(1, n) { - panic(badLdB) - } - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if aTrans { - if len(a) < (k-1)*lda+m { - panic(shortA) - } - } else { - if len(a) < (m-1)*lda+k { - panic(shortA) - } - } - if bTrans { - if len(b) < (n-1)*ldb+k { - panic(shortB) - } - } else { - if len(b) < (k-1)*ldb+n { - panic(shortB) - } - } - if len(c) < (m-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - // scale c - if beta != 1 { - if beta == 0 { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - } - } - - dgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) -} - -func dgemmParallel(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { - // dgemmParallel computes a parallel matrix multiplication by partitioning - // a and b into sub-blocks, and updating c with the multiplication of the sub-block - // In all cases, - // A = [ A_11 A_12 ... A_1j - // A_21 A_22 ... A_2j - // ... - // A_i1 A_i2 ... A_ij] - // - // and same for B. All of the submatrix sizes are blockSize×blockSize except - // at the edges. - // - // In all cases, there is one dimension for each matrix along which - // C must be updated sequentially. - // Cij = \sum_k Aik Bki, (A * B) - // Cij = \sum_k Aki Bkj, (A^T * B) - // Cij = \sum_k Aik Bjk, (A * B^T) - // Cij = \sum_k Aki Bjk, (A^T * B^T) - // - // This code computes one {i, j} block sequentially along the k dimension, - // and computes all of the {i, j} blocks concurrently. This - // partitioning allows Cij to be updated in-place without race-conditions. - // Instead of launching a goroutine for each possible concurrent computation, - // a number of worker goroutines are created and channels are used to pass - // available and completed cases. - // - // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix - // multiplies, though this code does not copy matrices to attempt to eliminate - // cache misses. - - maxKLen := k - parBlocks := blocks(m, blockSize) * blocks(n, blockSize) - if parBlocks < minParBlock { - // The matrix multiplication is small in the dimensions where it can be - // computed concurrently. Just do it in serial. - dgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - } - - nWorkers := runtime.GOMAXPROCS(0) - if parBlocks < nWorkers { - nWorkers = parBlocks - } - // There is a tradeoff between the workers having to wait for work - // and a large buffer making operations slow. - buf := buffMul * nWorkers - if buf > parBlocks { - buf = parBlocks - } - - sendChan := make(chan subMul, buf) - - // Launch workers. A worker receives an {i, j} submatrix of c, and computes - // A_ik B_ki (or the transposed version) storing the result in c_ij. When the - // channel is finally closed, it signals to the waitgroup that it has finished - // computing. - var wg sync.WaitGroup - for i := 0; i < nWorkers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for sub := range sendChan { - i := sub.i - j := sub.j - leni := blockSize - if i+leni > m { - leni = m - i - } - lenj := blockSize - if j+lenj > n { - lenj = n - j - } - - cSub := sliceView64(c, ldc, i, j, leni, lenj) - - // Compute A_ik B_kj for all k - for k := 0; k < maxKLen; k += blockSize { - lenk := blockSize - if k+lenk > maxKLen { - lenk = maxKLen - k - } - var aSub, bSub []float64 - if aTrans { - aSub = sliceView64(a, lda, k, i, lenk, leni) - } else { - aSub = sliceView64(a, lda, i, k, leni, lenk) - } - if bTrans { - bSub = sliceView64(b, ldb, j, k, lenj, lenk) - } else { - bSub = sliceView64(b, ldb, k, j, lenk, lenj) - } - dgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) - } - } - }() - } - - // Send out all of the {i, j} subblocks for computation. - for i := 0; i < m; i += blockSize { - for j := 0; j < n; j += blockSize { - sendChan <- subMul{ - i: i, - j: j, - } - } - } - close(sendChan) - wg.Wait() -} - -// dgemmSerial is serial matrix multiply -func dgemmSerial(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { - switch { - case !aTrans && !bTrans: - dgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - case aTrans && !bTrans: - dgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - case !aTrans && bTrans: - dgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - case aTrans && bTrans: - dgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - default: - panic("unreachable") - } -} - -// dgemmSerial where neither a nor b are transposed -func dgemmSerialNotNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for l, v := range a[i*lda : i*lda+k] { - tmp := alpha * v - if tmp != 0 { - f64.AxpyUnitary(tmp, b[l*ldb:l*ldb+n], ctmp) - } - } - } -} - -// dgemmSerial where neither a is transposed and b is not -func dgemmSerialTransNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < k; l++ { - btmp := b[l*ldb : l*ldb+n] - for i, v := range a[l*lda : l*lda+m] { - tmp := alpha * v - if tmp != 0 { - ctmp := c[i*ldc : i*ldc+n] - f64.AxpyUnitary(tmp, btmp, ctmp) - } - } - } -} - -// dgemmSerial where neither a is not transposed and b is -func dgemmSerialNotTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < m; i++ { - atmp := a[i*lda : i*lda+k] - ctmp := c[i*ldc : i*ldc+n] - for j := 0; j < n; j++ { - ctmp[j] += alpha * f64.DotUnitary(atmp, b[j*ldb:j*ldb+k]) - } - } -} - -// dgemmSerial where both are transposed -func dgemmSerialTransTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < k; l++ { - for i, v := range a[l*lda : l*lda+m] { - tmp := alpha * v - if tmp != 0 { - ctmp := c[i*ldc : i*ldc+n] - f64.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) - } - } - } -} - -func sliceView64(a []float64, lda, i, j, r, c int) []float64 { - return a[i*lda+j : (i+r-1)*lda+j+c] -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/doc.go b/vendor/gonum.org/v1/gonum/blas/gonum/doc.go deleted file mode 100644 index 3f4b6c1d0..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/doc.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Ensure changes made to blas/native are reflected in blas/cgo where relevant. - -/* -Package gonum is a Go implementation of the BLAS API. This implementation -panics when the input arguments are invalid as per the standard, for example -if a vector increment is zero. Note that the treatment of NaN values -is not specified, and differs among the BLAS implementations. -gonum.org/v1/gonum/blas/blas64 provides helpful wrapper functions to the BLAS -interface. The rest of this text describes the layout of the data for the input types. - -Note that in the function documentation, x[i] refers to the i^th element -of the vector, which will be different from the i^th element of the slice if -incX != 1. - -See http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html -for more license information. - -Vector arguments are effectively strided slices. They have two input arguments, -a number of elements, n, and an increment, incX. The increment specifies the -distance between elements of the vector. The actual Go slice may be longer -than necessary. -The increment may be positive or negative, except in functions with only -a single vector argument where the increment may only be positive. If the increment -is negative, s[0] is the last element in the slice. Note that this is not the same -as counting backward from the end of the slice, as len(s) may be longer than -necessary. So, for example, if n = 5 and incX = 3, the elements of s are - [0 * * 1 * * 2 * * 3 * * 4 * * * ...] -where ∗ elements are never accessed. If incX = -3, the same elements are -accessed, just in reverse order (4, 3, 2, 1, 0). - -Dense matrices are specified by a number of rows, a number of columns, and a stride. -The stride specifies the number of entries in the slice between the first element -of successive rows. The stride must be at least as large as the number of columns -but may be longer. - [a00 ... a0n a0* ... a1stride-1 a21 ... amn am* ... amstride-1] -Thus, dense[i*ld + j] refers to the {i, j}th element of the matrix. - -Symmetric and triangular matrices (non-packed) are stored identically to Dense, -except that only elements in one triangle of the matrix are accessed. - -Packed symmetric and packed triangular matrices are laid out with the entries -condensed such that all of the unreferenced elements are removed. So, the upper triangular -matrix - [ - 1 2 3 - 0 4 5 - 0 0 6 - ] -and the lower-triangular matrix - [ - 1 0 0 - 2 3 0 - 4 5 6 - ] -will both be compacted as [1 2 3 4 5 6]. The (i, j) element of the original -dense matrix can be found at element i*n - (i-1)*i/2 + j for upper triangular, -and at element i * (i+1) /2 + j for lower triangular. - -Banded matrices are laid out in a compact format, constructed by removing the -zeros in the rows and aligning the diagonals. For example, the matrix - [ - 1 2 3 0 0 0 - 4 5 6 7 0 0 - 0 8 9 10 11 0 - 0 0 12 13 14 15 - 0 0 0 16 17 18 - 0 0 0 0 19 20 - ] - -implicitly becomes (∗ entries are never accessed) - [ - * 1 2 3 - 4 5 6 7 - 8 9 10 11 - 12 13 14 15 - 16 17 18 * - 19 20 * * - ] -which is given to the BLAS routine as [∗ 1 2 3 4 ...]. - -See http://www.crest.iu.edu/research/mtl/reference/html/banded.html -for more information -*/ -package gonum // import "gonum.org/v1/gonum/blas/gonum" diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/errors.go b/vendor/gonum.org/v1/gonum/blas/gonum/errors.go deleted file mode 100644 index e98575d0f..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -// Panic strings used during parameter checks. -// This list is duplicated in netlib/blas/netlib. Keep in sync. -const ( - zeroIncX = "blas: zero x index increment" - zeroIncY = "blas: zero y index increment" - - mLT0 = "blas: m < 0" - nLT0 = "blas: n < 0" - kLT0 = "blas: k < 0" - kLLT0 = "blas: kL < 0" - kULT0 = "blas: kU < 0" - - badUplo = "blas: illegal triangle" - badTranspose = "blas: illegal transpose" - badDiag = "blas: illegal diagonal" - badSide = "blas: illegal side" - badFlag = "blas: illegal rotm flag" - - badLdA = "blas: bad leading dimension of A" - badLdB = "blas: bad leading dimension of B" - badLdC = "blas: bad leading dimension of C" - - shortX = "blas: insufficient length of x" - shortY = "blas: insufficient length of y" - shortAP = "blas: insufficient length of ap" - shortA = "blas: insufficient length of a" - shortB = "blas: insufficient length of b" - shortC = "blas: insufficient length of c" -) diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go b/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go deleted file mode 100644 index 9b9a1beb0..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f32" - "gonum.org/v1/gonum/internal/asm/f64" -) - -// TODO(Kunde21): Merge these methods back into level2double/level2single when Sgemv assembly kernels are merged into f32. - -// Dgemv computes -// y = alpha * A * x + beta * y if tA = blas.NoTrans -// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. -func (Implementation) Dgemv(tA blas.Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - // Set up indexes - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - - // Quick return if possible - if m == 0 || n == 0 { - return - } - - if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible - if alpha == 0 && beta == 1 { - return - } - - if alpha == 0 { - // First form y = beta * y - if incY > 0 { - Implementation{}.Dscal(lenY, beta, y, incY) - } else { - Implementation{}.Dscal(lenY, beta, y, -incY) - } - return - } - - // Form y = alpha * A * x + y - if tA == blas.NoTrans { - f64.GemvN(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) - return - } - // Cases where a is transposed. - f64.GemvT(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) -} - -// Sgemv computes -// y = alpha * A * x + beta * y if tA = blas.NoTrans -// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sgemv(tA blas.Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // Set up indexes - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // First form y = beta * y - if incY > 0 { - Implementation{}.Sscal(lenY, beta, y, incY) - } else { - Implementation{}.Sscal(lenY, beta, y, -incY) - } - - if alpha == 0 { - return - } - - var kx, ky int - if incX < 0 { - kx = -(lenX - 1) * incX - } - if incY < 0 { - ky = -(lenY - 1) * incY - } - - // Form y = alpha * A * x + y - if tA == blas.NoTrans { - if incX == 1 && incY == 1 { - for i := 0; i < m; i++ { - y[i] += alpha * f32.DotUnitary(a[lda*i:lda*i+n], x[:n]) - } - return - } - iy := ky - for i := 0; i < m; i++ { - y[iy] += alpha * f32.DotInc(x, a[lda*i:lda*i+n], uintptr(n), uintptr(incX), 1, uintptr(kx), 0) - iy += incY - } - return - } - // Cases where a is transposed. - if incX == 1 && incY == 1 { - for i := 0; i < m; i++ { - tmp := alpha * x[i] - if tmp != 0 { - f32.AxpyUnitaryTo(y, tmp, a[lda*i:lda*i+n], y[:n]) - } - } - return - } - ix := kx - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - if tmp != 0 { - f32.AxpyInc(tmp, a[lda*i:lda*i+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) - } - ix += incX - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go deleted file mode 100644 index 8ab8d43e1..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate ./single_precision.bash - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/internal/math32" -) - -type Implementation struct{} - -// [SD]gemm behavior constants. These are kept here to keep them out of the -// way during single precision code genration. -const ( - blockSize = 64 // b x b matrix - minParBlock = 4 // minimum number of blocks needed to go parallel - buffMul = 4 // how big is the buffer relative to the number of workers -) - -// subMul is a common type shared by [SD]gemm. -type subMul struct { - i, j int // index of block -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a > b { - return b - } - return a -} - -// blocks returns the number of divisions of the dimension length with the given -// block size. -func blocks(dim, bsize int) int { - return (dim + bsize - 1) / bsize -} - -// dcabs1 returns |real(z)|+|imag(z)|. -func dcabs1(z complex128) float64 { - return math.Abs(real(z)) + math.Abs(imag(z)) -} - -// scabs1 returns |real(z)|+|imag(z)|. -func scabs1(z complex64) float32 { - return math32.Abs(real(z)) + math32.Abs(imag(z)) -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go deleted file mode 100644 index e37bf44dd..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/c128" -) - -var _ blas.Complex128Level1 = Implementation{} - -// Dzasum returns the sum of the absolute values of the elements of x -// \sum_i |Re(x[i])| + |Im(x[i])| -// Dzasum returns 0 if incX is negative. -func (Implementation) Dzasum(n int, x []complex128, incX int) float64 { - if n < 0 { - panic(nLT0) - } - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - var sum float64 - if incX == 1 { - if len(x) < n { - panic(shortX) - } - for _, v := range x[:n] { - sum += dcabs1(v) - } - return sum - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - for i := 0; i < n; i++ { - v := x[i*incX] - sum += dcabs1(v) - } - return sum -} - -// Dznrm2 computes the Euclidean norm of the complex vector x, -// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). -// This function returns 0 if incX is negative. -func (Implementation) Dznrm2(n int, x []complex128, incX int) float64 { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if n < 1 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - var ( - scale float64 - ssq float64 = 1 - ) - if incX == 1 { - for _, v := range x[:n] { - re, im := math.Abs(real(v)), math.Abs(imag(v)) - if re != 0 { - if re > scale { - ssq = 1 + ssq*(scale/re)*(scale/re) - scale = re - } else { - ssq += (re / scale) * (re / scale) - } - } - if im != 0 { - if im > scale { - ssq = 1 + ssq*(scale/im)*(scale/im) - scale = im - } else { - ssq += (im / scale) * (im / scale) - } - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(ssq) - } - for ix := 0; ix < n*incX; ix += incX { - re, im := math.Abs(real(x[ix])), math.Abs(imag(x[ix])) - if re != 0 { - if re > scale { - ssq = 1 + ssq*(scale/re)*(scale/re) - scale = re - } else { - ssq += (re / scale) * (re / scale) - } - } - if im != 0 { - if im > scale { - ssq = 1 + ssq*(scale/im)*(scale/im) - scale = im - } else { - ssq += (im / scale) * (im / scale) - } - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(ssq) -} - -// Izamax returns the index of the first element of x having largest |Re(·)|+|Im(·)|. -// Izamax returns -1 if n is 0 or incX is negative. -func (Implementation) Izamax(n int, x []complex128, incX int) int { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - // Return invalid index. - return -1 - } - if n < 1 { - if n == 0 { - // Return invalid index. - return -1 - } - panic(nLT0) - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - idx := 0 - max := dcabs1(x[0]) - if incX == 1 { - for i, v := range x[1:n] { - absV := dcabs1(v) - if absV > max { - max = absV - idx = i + 1 - } - } - return idx - } - ix := incX - for i := 1; i < n; i++ { - absV := dcabs1(x[ix]) - if absV > max { - max = absV - idx = i - } - ix += incX - } - return idx -} - -// Zaxpy adds alpha times x to y: -// y[i] += alpha * x[i] for all i -func (Implementation) Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(shortY) - } - if alpha == 0 { - return - } - if incX == 1 && incY == 1 { - c128.AxpyUnitary(alpha, x[:n], y[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (1 - n) * incX - } - if incY < 0 { - iy = (1 - n) * incY - } - c128.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Zcopy copies the vector x to vector y. -func (Implementation) Zcopy(n int, x []complex128, incX int, y []complex128, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(shortY) - } - if incX == 1 && incY == 1 { - copy(y[:n], x[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - y[iy] = x[ix] - ix += incX - iy += incY - } -} - -// Zdotc computes the dot product -// x^H · y -// of two complex vectors x and y. -func (Implementation) Zdotc(n int, x []complex128, incX int, y []complex128, incY int) complex128 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return c128.DotcUnitary(x[:n], y[:n]) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || (n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || (n-1)*incY >= len(y) { - panic(shortY) - } - return c128.DotcInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Zdotu computes the dot product -// x^T · y -// of two complex vectors x and y. -func (Implementation) Zdotu(n int, x []complex128, incX int, y []complex128, incY int) complex128 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return c128.DotuUnitary(x[:n], y[:n]) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || (n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || (n-1)*incY >= len(y) { - panic(shortY) - } - return c128.DotuInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Zdscal scales the vector x by a real scalar alpha. -// Zdscal has no effect if incX < 0. -func (Implementation) Zdscal(n int, alpha float64, x []complex128, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - return - } - if incX == 1 { - x = x[:n] - for i, v := range x { - x[i] = complex(alpha*real(v), alpha*imag(v)) - } - return - } - for ix := 0; ix < n*incX; ix += incX { - v := x[ix] - x[ix] = complex(alpha*real(v), alpha*imag(v)) - } -} - -// Zscal scales the vector x by a complex scalar alpha. -// Zscal has no effect if incX < 0. -func (Implementation) Zscal(n int, alpha complex128, x []complex128, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - return - } - if incX == 1 { - c128.ScalUnitary(alpha, x[:n]) - return - } - c128.ScalInc(alpha, x, uintptr(n), uintptr(incX)) -} - -// Zswap exchanges the elements of two complex vectors x and y. -func (Implementation) Zswap(n int, x []complex128, incX int, y []complex128, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(shortY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, v := range x { - x[i], y[i] = y[i], v - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - x[ix], y[iy] = y[iy], x[ix] - ix += incX - iy += incY - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go deleted file mode 100644 index ba192ea59..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go +++ /dev/null @@ -1,467 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - math "gonum.org/v1/gonum/internal/math32" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/c64" -) - -var _ blas.Complex64Level1 = Implementation{} - -// Scasum returns the sum of the absolute values of the elements of x -// \sum_i |Re(x[i])| + |Im(x[i])| -// Scasum returns 0 if incX is negative. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Scasum(n int, x []complex64, incX int) float32 { - if n < 0 { - panic(nLT0) - } - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - var sum float32 - if incX == 1 { - if len(x) < n { - panic(shortX) - } - for _, v := range x[:n] { - sum += scabs1(v) - } - return sum - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - for i := 0; i < n; i++ { - v := x[i*incX] - sum += scabs1(v) - } - return sum -} - -// Scnrm2 computes the Euclidean norm of the complex vector x, -// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). -// This function returns 0 if incX is negative. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Scnrm2(n int, x []complex64, incX int) float32 { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if n < 1 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - var ( - scale float32 - ssq float32 = 1 - ) - if incX == 1 { - for _, v := range x[:n] { - re, im := math.Abs(real(v)), math.Abs(imag(v)) - if re != 0 { - if re > scale { - ssq = 1 + ssq*(scale/re)*(scale/re) - scale = re - } else { - ssq += (re / scale) * (re / scale) - } - } - if im != 0 { - if im > scale { - ssq = 1 + ssq*(scale/im)*(scale/im) - scale = im - } else { - ssq += (im / scale) * (im / scale) - } - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(ssq) - } - for ix := 0; ix < n*incX; ix += incX { - re, im := math.Abs(real(x[ix])), math.Abs(imag(x[ix])) - if re != 0 { - if re > scale { - ssq = 1 + ssq*(scale/re)*(scale/re) - scale = re - } else { - ssq += (re / scale) * (re / scale) - } - } - if im != 0 { - if im > scale { - ssq = 1 + ssq*(scale/im)*(scale/im) - scale = im - } else { - ssq += (im / scale) * (im / scale) - } - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(ssq) -} - -// Icamax returns the index of the first element of x having largest |Re(·)|+|Im(·)|. -// Icamax returns -1 if n is 0 or incX is negative. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Icamax(n int, x []complex64, incX int) int { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - // Return invalid index. - return -1 - } - if n < 1 { - if n == 0 { - // Return invalid index. - return -1 - } - panic(nLT0) - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - idx := 0 - max := scabs1(x[0]) - if incX == 1 { - for i, v := range x[1:n] { - absV := scabs1(v) - if absV > max { - max = absV - idx = i + 1 - } - } - return idx - } - ix := incX - for i := 1; i < n; i++ { - absV := scabs1(x[ix]) - if absV > max { - max = absV - idx = i - } - ix += incX - } - return idx -} - -// Caxpy adds alpha times x to y: -// y[i] += alpha * x[i] for all i -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(shortY) - } - if alpha == 0 { - return - } - if incX == 1 && incY == 1 { - c64.AxpyUnitary(alpha, x[:n], y[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (1 - n) * incX - } - if incY < 0 { - iy = (1 - n) * incY - } - c64.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Ccopy copies the vector x to vector y. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ccopy(n int, x []complex64, incX int, y []complex64, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(shortY) - } - if incX == 1 && incY == 1 { - copy(y[:n], x[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - y[iy] = x[ix] - ix += incX - iy += incY - } -} - -// Cdotc computes the dot product -// x^H · y -// of two complex vectors x and y. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cdotc(n int, x []complex64, incX int, y []complex64, incY int) complex64 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return c64.DotcUnitary(x[:n], y[:n]) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || (n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || (n-1)*incY >= len(y) { - panic(shortY) - } - return c64.DotcInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Cdotu computes the dot product -// x^T · y -// of two complex vectors x and y. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cdotu(n int, x []complex64, incX int, y []complex64, incY int) complex64 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return c64.DotuUnitary(x[:n], y[:n]) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || (n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || (n-1)*incY >= len(y) { - panic(shortY) - } - return c64.DotuInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Csscal scales the vector x by a real scalar alpha. -// Csscal has no effect if incX < 0. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Csscal(n int, alpha float32, x []complex64, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - return - } - if incX == 1 { - x = x[:n] - for i, v := range x { - x[i] = complex(alpha*real(v), alpha*imag(v)) - } - return - } - for ix := 0; ix < n*incX; ix += incX { - v := x[ix] - x[ix] = complex(alpha*real(v), alpha*imag(v)) - } -} - -// Cscal scales the vector x by a complex scalar alpha. -// Cscal has no effect if incX < 0. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cscal(n int, alpha complex64, x []complex64, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - return - } - if incX == 1 { - c64.ScalUnitary(alpha, x[:n]) - return - } - c64.ScalInc(alpha, x, uintptr(n), uintptr(incX)) -} - -// Cswap exchanges the elements of two complex vectors x and y. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cswap(n int, x []complex64, incX int, y []complex64, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(shortX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(shortY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, v := range x { - x[i], y[i] = y[i], v - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - x[ix], y[iy] = y[iy], x[ix] - ix += incX - iy += incY - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go deleted file mode 100644 index ee82083a6..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go +++ /dev/null @@ -1,644 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - math "gonum.org/v1/gonum/internal/math32" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f32" -) - -var _ blas.Float32Level1 = Implementation{} - -// Snrm2 computes the Euclidean norm of a vector, -// sqrt(\sum_i x[i] * x[i]). -// This function returns 0 if incX is negative. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Snrm2(n int, x []float32, incX int) float32 { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - if n < 2 { - if n == 1 { - return math.Abs(x[0]) - } - if n == 0 { - return 0 - } - panic(nLT0) - } - var ( - scale float32 = 0 - sumSquares float32 = 1 - ) - if incX == 1 { - x = x[:n] - for _, v := range x { - if v == 0 { - continue - } - absxi := math.Abs(v) - if math.IsNaN(absxi) { - return math.NaN() - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(sumSquares) - } - for ix := 0; ix < n*incX; ix += incX { - val := x[ix] - if val == 0 { - continue - } - absxi := math.Abs(val) - if math.IsNaN(absxi) { - return math.NaN() - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(sumSquares) -} - -// Sasum computes the sum of the absolute values of the elements of x. -// \sum_i |x[i]| -// Sasum returns 0 if incX is negative. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sasum(n int, x []float32, incX int) float32 { - var sum float32 - if n < 0 { - panic(nLT0) - } - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - if incX == 1 { - x = x[:n] - for _, v := range x { - sum += math.Abs(v) - } - return sum - } - for i := 0; i < n; i++ { - sum += math.Abs(x[i*incX]) - } - return sum -} - -// Isamax returns the index of an element of x with the largest absolute value. -// If there are multiple such indices the earliest is returned. -// Isamax returns -1 if n == 0. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Isamax(n int, x []float32, incX int) int { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return -1 - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - if n < 2 { - if n == 1 { - return 0 - } - if n == 0 { - return -1 // Netlib returns invalid index when n == 0. - } - panic(nLT0) - } - idx := 0 - max := math.Abs(x[0]) - if incX == 1 { - for i, v := range x[:n] { - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - } - return idx - } - ix := incX - for i := 1; i < n; i++ { - v := x[ix] - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - ix += incX - } - return idx -} - -// Sswap exchanges the elements of two vectors. -// x[i], y[i] = y[i], x[i] for all i -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sswap(n int, x []float32, incX int, y []float32, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, v := range x { - x[i], y[i] = y[i], v - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - x[ix], y[iy] = y[iy], x[ix] - ix += incX - iy += incY - } -} - -// Scopy copies the elements of x into the elements of y. -// y[i] = x[i] for all i -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Scopy(n int, x []float32, incX int, y []float32, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if incX == 1 && incY == 1 { - copy(y[:n], x[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - y[iy] = x[ix] - ix += incX - iy += incY - } -} - -// Saxpy adds alpha times x to y -// y[i] += alpha * x[i] for all i -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if alpha == 0 { - return - } - if incX == 1 && incY == 1 { - f32.AxpyUnitary(alpha, x[:n], y[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - f32.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Srotg computes the plane rotation -// _ _ _ _ _ _ -// | c s | | a | | r | -// | -s c | * | b | = | 0 | -// ‾ ‾ ‾ ‾ ‾ ‾ -// where -// r = ±√(a^2 + b^2) -// c = a/r, the cosine of the plane rotation -// s = b/r, the sine of the plane rotation -// -// NOTE: There is a discrepancy between the reference implementation and the BLAS -// technical manual regarding the sign for r when a or b are zero. -// Srotg agrees with the definition in the manual and other -// common BLAS implementations. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srotg(a, b float32) (c, s, r, z float32) { - if b == 0 && a == 0 { - return 1, 0, a, 0 - } - absA := math.Abs(a) - absB := math.Abs(b) - aGTb := absA > absB - r = math.Hypot(a, b) - if aGTb { - r = math.Copysign(r, a) - } else { - r = math.Copysign(r, b) - } - c = a / r - s = b / r - if aGTb { - z = s - } else if c != 0 { // r == 0 case handled above - z = 1 / c - } else { - z = 1 - } - return -} - -// Srotmg computes the modified Givens rotation. See -// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html -// for more details. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srotmg(d1, d2, x1, y1 float32) (p blas.SrotmParams, rd1, rd2, rx1 float32) { - // The implementation of Drotmg used here is taken from Hopkins 1997 - // Appendix A: https://doi.org/10.1145/289251.289253 - // with the exception of the gam constants below. - - const ( - gam = 4096.0 - gamsq = gam * gam - rgamsq = 1.0 / gamsq - ) - - if d1 < 0 { - p.Flag = blas.Rescaling // Error state. - return p, 0, 0, 0 - } - - if d2 == 0 || y1 == 0 { - p.Flag = blas.Identity - return p, d1, d2, x1 - } - - var h11, h12, h21, h22 float32 - if (d1 == 0 || x1 == 0) && d2 > 0 { - p.Flag = blas.Diagonal - h12 = 1 - h21 = -1 - x1 = y1 - d1, d2 = d2, d1 - } else { - p2 := d2 * y1 - p1 := d1 * x1 - q2 := p2 * y1 - q1 := p1 * x1 - if math.Abs(q1) > math.Abs(q2) { - p.Flag = blas.OffDiagonal - h11 = 1 - h22 = 1 - h21 = -y1 / x1 - h12 = p2 / p1 - u := 1 - h12*h21 - if u <= 0 { - p.Flag = blas.Rescaling // Error state. - return p, 0, 0, 0 - } - - d1 /= u - d2 /= u - x1 *= u - } else { - if q2 < 0 { - p.Flag = blas.Rescaling // Error state. - return p, 0, 0, 0 - } - - p.Flag = blas.Diagonal - h21 = -1 - h12 = 1 - h11 = p1 / p2 - h22 = x1 / y1 - u := 1 + h11*h22 - d1, d2 = d2/u, d1/u - x1 = y1 * u - } - } - - for d1 <= rgamsq && d1 != 0 { - p.Flag = blas.Rescaling - d1 = (d1 * gam) * gam - x1 /= gam - h11 /= gam - h12 /= gam - } - for d1 > gamsq { - p.Flag = blas.Rescaling - d1 = (d1 / gam) / gam - x1 *= gam - h11 *= gam - h12 *= gam - } - - for math.Abs(d2) <= rgamsq && d2 != 0 { - p.Flag = blas.Rescaling - d2 = (d2 * gam) * gam - h21 /= gam - h22 /= gam - } - for math.Abs(d2) > gamsq { - p.Flag = blas.Rescaling - d2 = (d2 / gam) / gam - h21 *= gam - h22 *= gam - } - - switch p.Flag { - case blas.Diagonal: - p.H = [4]float32{0: h11, 3: h22} - case blas.OffDiagonal: - p.H = [4]float32{1: h21, 2: h12} - case blas.Rescaling: - p.H = [4]float32{h11, h21, h12, h22} - default: - panic(badFlag) - } - - return p, d1, d2, x1 -} - -// Srot applies a plane transformation. -// x[i] = c * x[i] + s * y[i] -// y[i] = c * y[i] - s * x[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srot(n int, x []float32, incX int, y []float32, incY int, c float32, s float32) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = c*vx+s*vy, c*vy-s*vx - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx - ix += incX - iy += incY - } -} - -// Srotm applies the modified Givens rotation to the 2×n matrix. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srotm(n int, x []float32, incX int, y []float32, incY int, p blas.SrotmParams) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - if p.Flag == blas.Identity { - return - } - - switch p.Flag { - case blas.Rescaling: - h11 := p.H[0] - h12 := p.H[2] - h21 := p.H[1] - h22 := p.H[3] - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 - ix += incX - iy += incY - } - case blas.OffDiagonal: - h12 := p.H[2] - h21 := p.H[1] - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx+vy*h12, vx*h21+vy - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx+vy*h12, vx*h21+vy - ix += incX - iy += incY - } - case blas.Diagonal: - h11 := p.H[0] - h22 := p.H[3] - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx*h11+vy, -vx+vy*h22 - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 - ix += incX - iy += incY - } - } -} - -// Sscal scales x by alpha. -// x[i] *= alpha -// Sscal has no effect if incX < 0. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sscal(n int, alpha float32, x []float32, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - return - } - if incX == 1 { - f32.ScalUnitary(alpha, x[:n]) - return - } - f32.ScalInc(alpha, x, uintptr(n), uintptr(incX)) -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go deleted file mode 100644 index 089e0d8f0..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/internal/asm/f32" -) - -// Dsdot computes the dot product of the two vectors -// \sum_i x[i]*y[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return f32.DdotUnitary(x[:n], y[:n]) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(shortY) - } - return f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go deleted file mode 100644 index 41c3e7923..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/internal/asm/f32" -) - -// Sdot computes the dot product of the two vectors -// \sum_i x[i]*y[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sdot(n int, x []float32, incX int, y []float32, incY int) float32 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return f32.DotUnitary(x[:n], y[:n]) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(shortY) - } - return f32.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go deleted file mode 100644 index 69dd8aa1f..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/internal/asm/f32" -) - -// Sdsdot computes the dot product of the two vectors plus a constant -// alpha + \sum_i x[i]*y[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return alpha + float32(f32.DdotUnitary(x[:n], y[:n])) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(shortY) - } - return alpha + float32(f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy))) -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go deleted file mode 100644 index 2e8ed543a..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go +++ /dev/null @@ -1,620 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f64" -) - -var _ blas.Float64Level1 = Implementation{} - -// Dnrm2 computes the Euclidean norm of a vector, -// sqrt(\sum_i x[i] * x[i]). -// This function returns 0 if incX is negative. -func (Implementation) Dnrm2(n int, x []float64, incX int) float64 { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - if n < 2 { - if n == 1 { - return math.Abs(x[0]) - } - if n == 0 { - return 0 - } - panic(nLT0) - } - var ( - scale float64 = 0 - sumSquares float64 = 1 - ) - if incX == 1 { - x = x[:n] - for _, v := range x { - if v == 0 { - continue - } - absxi := math.Abs(v) - if math.IsNaN(absxi) { - return math.NaN() - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(sumSquares) - } - for ix := 0; ix < n*incX; ix += incX { - val := x[ix] - if val == 0 { - continue - } - absxi := math.Abs(val) - if math.IsNaN(absxi) { - return math.NaN() - } - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - if math.IsInf(scale, 1) { - return math.Inf(1) - } - return scale * math.Sqrt(sumSquares) -} - -// Dasum computes the sum of the absolute values of the elements of x. -// \sum_i |x[i]| -// Dasum returns 0 if incX is negative. -func (Implementation) Dasum(n int, x []float64, incX int) float64 { - var sum float64 - if n < 0 { - panic(nLT0) - } - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - if incX == 1 { - x = x[:n] - for _, v := range x { - sum += math.Abs(v) - } - return sum - } - for i := 0; i < n; i++ { - sum += math.Abs(x[i*incX]) - } - return sum -} - -// Idamax returns the index of an element of x with the largest absolute value. -// If there are multiple such indices the earliest is returned. -// Idamax returns -1 if n == 0. -func (Implementation) Idamax(n int, x []float64, incX int) int { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return -1 - } - if len(x) <= (n-1)*incX { - panic(shortX) - } - if n < 2 { - if n == 1 { - return 0 - } - if n == 0 { - return -1 // Netlib returns invalid index when n == 0. - } - panic(nLT0) - } - idx := 0 - max := math.Abs(x[0]) - if incX == 1 { - for i, v := range x[:n] { - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - } - return idx - } - ix := incX - for i := 1; i < n; i++ { - v := x[ix] - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - ix += incX - } - return idx -} - -// Dswap exchanges the elements of two vectors. -// x[i], y[i] = y[i], x[i] for all i -func (Implementation) Dswap(n int, x []float64, incX int, y []float64, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, v := range x { - x[i], y[i] = y[i], v - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - x[ix], y[iy] = y[iy], x[ix] - ix += incX - iy += incY - } -} - -// Dcopy copies the elements of x into the elements of y. -// y[i] = x[i] for all i -func (Implementation) Dcopy(n int, x []float64, incX int, y []float64, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if incX == 1 && incY == 1 { - copy(y[:n], x[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - y[iy] = x[ix] - ix += incX - iy += incY - } -} - -// Daxpy adds alpha times x to y -// y[i] += alpha * x[i] for all i -func (Implementation) Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if alpha == 0 { - return - } - if incX == 1 && incY == 1 { - f64.AxpyUnitary(alpha, x[:n], y[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - f64.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Drotg computes the plane rotation -// _ _ _ _ _ _ -// | c s | | a | | r | -// | -s c | * | b | = | 0 | -// ‾ ‾ ‾ ‾ ‾ ‾ -// where -// r = ±√(a^2 + b^2) -// c = a/r, the cosine of the plane rotation -// s = b/r, the sine of the plane rotation -// -// NOTE: There is a discrepancy between the reference implementation and the BLAS -// technical manual regarding the sign for r when a or b are zero. -// Drotg agrees with the definition in the manual and other -// common BLAS implementations. -func (Implementation) Drotg(a, b float64) (c, s, r, z float64) { - if b == 0 && a == 0 { - return 1, 0, a, 0 - } - absA := math.Abs(a) - absB := math.Abs(b) - aGTb := absA > absB - r = math.Hypot(a, b) - if aGTb { - r = math.Copysign(r, a) - } else { - r = math.Copysign(r, b) - } - c = a / r - s = b / r - if aGTb { - z = s - } else if c != 0 { // r == 0 case handled above - z = 1 / c - } else { - z = 1 - } - return -} - -// Drotmg computes the modified Givens rotation. See -// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html -// for more details. -func (Implementation) Drotmg(d1, d2, x1, y1 float64) (p blas.DrotmParams, rd1, rd2, rx1 float64) { - // The implementation of Drotmg used here is taken from Hopkins 1997 - // Appendix A: https://doi.org/10.1145/289251.289253 - // with the exception of the gam constants below. - - const ( - gam = 4096.0 - gamsq = gam * gam - rgamsq = 1.0 / gamsq - ) - - if d1 < 0 { - p.Flag = blas.Rescaling // Error state. - return p, 0, 0, 0 - } - - if d2 == 0 || y1 == 0 { - p.Flag = blas.Identity - return p, d1, d2, x1 - } - - var h11, h12, h21, h22 float64 - if (d1 == 0 || x1 == 0) && d2 > 0 { - p.Flag = blas.Diagonal - h12 = 1 - h21 = -1 - x1 = y1 - d1, d2 = d2, d1 - } else { - p2 := d2 * y1 - p1 := d1 * x1 - q2 := p2 * y1 - q1 := p1 * x1 - if math.Abs(q1) > math.Abs(q2) { - p.Flag = blas.OffDiagonal - h11 = 1 - h22 = 1 - h21 = -y1 / x1 - h12 = p2 / p1 - u := 1 - h12*h21 - if u <= 0 { - p.Flag = blas.Rescaling // Error state. - return p, 0, 0, 0 - } - - d1 /= u - d2 /= u - x1 *= u - } else { - if q2 < 0 { - p.Flag = blas.Rescaling // Error state. - return p, 0, 0, 0 - } - - p.Flag = blas.Diagonal - h21 = -1 - h12 = 1 - h11 = p1 / p2 - h22 = x1 / y1 - u := 1 + h11*h22 - d1, d2 = d2/u, d1/u - x1 = y1 * u - } - } - - for d1 <= rgamsq && d1 != 0 { - p.Flag = blas.Rescaling - d1 = (d1 * gam) * gam - x1 /= gam - h11 /= gam - h12 /= gam - } - for d1 > gamsq { - p.Flag = blas.Rescaling - d1 = (d1 / gam) / gam - x1 *= gam - h11 *= gam - h12 *= gam - } - - for math.Abs(d2) <= rgamsq && d2 != 0 { - p.Flag = blas.Rescaling - d2 = (d2 * gam) * gam - h21 /= gam - h22 /= gam - } - for math.Abs(d2) > gamsq { - p.Flag = blas.Rescaling - d2 = (d2 / gam) / gam - h21 *= gam - h22 *= gam - } - - switch p.Flag { - case blas.Diagonal: - p.H = [4]float64{0: h11, 3: h22} - case blas.OffDiagonal: - p.H = [4]float64{1: h21, 2: h12} - case blas.Rescaling: - p.H = [4]float64{h11, h21, h12, h22} - default: - panic(badFlag) - } - - return p, d1, d2, x1 -} - -// Drot applies a plane transformation. -// x[i] = c * x[i] + s * y[i] -// y[i] = c * y[i] - s * x[i] -func (Implementation) Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = c*vx+s*vy, c*vy-s*vx - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx - ix += incX - iy += incY - } -} - -// Drotm applies the modified Givens rotation to the 2×n matrix. -func (Implementation) Drotm(n int, x []float64, incX int, y []float64, incY int, p blas.DrotmParams) { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return - } - panic(nLT0) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - if p.Flag == blas.Identity { - return - } - - switch p.Flag { - case blas.Rescaling: - h11 := p.H[0] - h12 := p.H[2] - h21 := p.H[1] - h22 := p.H[3] - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 - ix += incX - iy += incY - } - case blas.OffDiagonal: - h12 := p.H[2] - h21 := p.H[1] - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx+vy*h12, vx*h21+vy - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx+vy*h12, vx*h21+vy - ix += incX - iy += incY - } - case blas.Diagonal: - h11 := p.H[0] - h22 := p.H[3] - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx*h11+vy, -vx+vy*h22 - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 - ix += incX - iy += incY - } - } -} - -// Dscal scales x by alpha. -// x[i] *= alpha -// Dscal has no effect if incX < 0. -func (Implementation) Dscal(n int, alpha float64, x []float64, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if n < 1 { - if n == 0 { - return - } - panic(nLT0) - } - if (n-1)*incX >= len(x) { - panic(shortX) - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - return - } - if incX == 1 { - f64.ScalUnitary(alpha, x[:n]) - return - } - f64.ScalInc(alpha, x, uintptr(n), uintptr(incX)) -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go deleted file mode 100644 index be87ba13d..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/internal/asm/f64" -) - -// Ddot computes the dot product of the two vectors -// \sum_i x[i]*y[i] -func (Implementation) Ddot(n int, x []float64, incX int, y []float64, incY int) float64 { - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if n <= 0 { - if n == 0 { - return 0 - } - panic(nLT0) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(shortX) - } - if len(y) < n { - panic(shortY) - } - return f64.DotUnitary(x[:n], y[:n]) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(shortX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(shortY) - } - return f64.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go deleted file mode 100644 index 03ee328fd..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go +++ /dev/null @@ -1,2906 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math/cmplx" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/c128" -) - -var _ blas.Complex128Level2 = Implementation{} - -// Zgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * A^T * x + beta * y if trans = blas.Trans -// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans -// where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix -// with kL sub-diagonals and kU super-diagonals. -func (Implementation) Zgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if kL < 0 { - panic(kLLT0) - } - if kU < 0 { - panic(kULT0) - } - if lda < kL+kU+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { - panic(shortA) - } - var lenX, lenY int - if trans == blas.NoTrans { - lenX, lenY = n, m - } else { - lenX, lenY = m, n - } - if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - var kx int - if incX < 0 { - kx = (1 - lenX) * incX - } - var ky int - if incY < 0 { - ky = (1 - lenY) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:lenY] { - y[i] = 0 - } - } else { - c128.ScalUnitary(beta, y[:lenY]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < lenY; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) - } else { - c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) - } - } - } - } - - nRow := min(m, n+kL) - nCol := kL + 1 + kU - switch trans { - case blas.NoTrans: - iy := ky - if incX == 1 { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) - xtmp := x[off : off+u-l] - var sum complex128 - for j, v := range aRow { - sum += xtmp[j] * v - } - y[iy] += alpha * sum - iy += incY - } - } else { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incX - jx := kx - var sum complex128 - for _, v := range aRow { - sum += x[off+jx] * v - jx += incX - } - y[iy] += alpha * sum - iy += incY - } - } - case blas.Trans: - if incX == 1 { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[i] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * v - jy += incY - } - } - } else { - ix := kx - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[ix] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * v - jy += incY - } - ix += incX - } - } - case blas.ConjTrans: - if incX == 1 { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[i] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - } - } else { - ix := kx - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[ix] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - ix += incX - } - } - } -} - -// Zgemv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * A^T * x + beta * y if trans = blas.Trans -// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans -// where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. -func (Implementation) Zgemv(trans blas.Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - var lenX, lenY int - if trans == blas.NoTrans { - lenX = n - lenY = m - } else { - lenX = m - lenY = n - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - var kx int - if incX < 0 { - kx = (1 - lenX) * incX - } - var ky int - if incY < 0 { - ky = (1 - lenY) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:lenY] { - y[i] = 0 - } - } else { - c128.ScalUnitary(beta, y[:lenY]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < lenY; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) - } else { - c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - switch trans { - default: - // Form y = alpha*A*x + y. - iy := ky - if incX == 1 { - for i := 0; i < m; i++ { - y[iy] += alpha * c128.DotuUnitary(a[i*lda:i*lda+n], x[:n]) - iy += incY - } - return - } - for i := 0; i < m; i++ { - y[iy] += alpha * c128.DotuInc(a[i*lda:i*lda+n], x, uintptr(n), 1, uintptr(incX), 0, uintptr(kx)) - iy += incY - } - return - - case blas.Trans: - // Form y = alpha*A^T*x + y. - ix := kx - if incY == 1 { - for i := 0; i < m; i++ { - c128.AxpyUnitary(alpha*x[ix], a[i*lda:i*lda+n], y[:n]) - ix += incX - } - return - } - for i := 0; i < m; i++ { - c128.AxpyInc(alpha*x[ix], a[i*lda:i*lda+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) - ix += incX - } - return - - case blas.ConjTrans: - // Form y = alpha*A^H*x + y. - ix := kx - if incY == 1 { - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - for j := 0; j < n; j++ { - y[j] += tmp * cmplx.Conj(a[i*lda+j]) - } - ix += incX - } - return - } - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - jy := ky - for j := 0; j < n; j++ { - y[jy] += tmp * cmplx.Conj(a[i*lda+j]) - jy += incY - } - ix += incX - } - return - } -} - -// Zgerc performs the rank-one operation -// A += alpha * x * y^H -// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, -// and y is an n element vector. -func (Implementation) Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx, jy int - if incX < 0 { - kx = (1 - m) * incX - } - if incY < 0 { - jy = (1 - n) * incY - } - for j := 0; j < n; j++ { - if y[jy] != 0 { - tmp := alpha * cmplx.Conj(y[jy]) - c128.AxpyInc(tmp, x, a[j:], uintptr(m), uintptr(incX), uintptr(lda), uintptr(kx), 0) - } - jy += incY - } -} - -// Zgeru performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, -// and y is an n element vector. -func (Implementation) Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx int - if incX < 0 { - kx = (1 - m) * incX - } - if incY == 1 { - for i := 0; i < m; i++ { - if x[kx] != 0 { - tmp := alpha * x[kx] - c128.AxpyUnitary(tmp, y[:n], a[i*lda:i*lda+n]) - } - kx += incX - } - return - } - var jy int - if incY < 0 { - jy = (1 - n) * incY - } - for i := 0; i < m; i++ { - if x[kx] != 0 { - tmp := alpha * x[kx] - c128.AxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(jy), 0) - } - kx += incX - } -} - -// Zhbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where alpha and beta are scalars, x and y are vectors, and A is an n×n -// Hermitian band matrix with k super-diagonals. The imaginary parts of -// the diagonal elements of A are ignored and assumed to be zero. -func (Implementation) Zhbmv(uplo blas.Uplo, n, k int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up the start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - for i, v := range y[:n] { - y[i] = beta * v - } - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - for i := 0; i < n; i++ { - y[iy] = beta * y[iy] - iy += incY - } - } - } - } - - if alpha == 0 { - return - } - - // The elements of A are accessed sequentially with one pass through a. - switch uplo { - case blas.Upper: - iy := ky - if incX == 1 { - for i := 0; i < n; i++ { - aRow := a[i*lda:] - alphaxi := alpha * x[i] - sum := alphaxi * complex(real(aRow[0]), 0) - u := min(k+1, n-i) - jy := incY - for j := 1; j < u; j++ { - v := aRow[j] - sum += alpha * x[i+j] * v - y[iy+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - y[iy] += sum - iy += incY - } - } else { - ix := kx - for i := 0; i < n; i++ { - aRow := a[i*lda:] - alphaxi := alpha * x[ix] - sum := alphaxi * complex(real(aRow[0]), 0) - u := min(k+1, n-i) - jx := incX - jy := incY - for j := 1; j < u; j++ { - v := aRow[j] - sum += alpha * x[ix+jx] * v - y[iy+jy] += alphaxi * cmplx.Conj(v) - jx += incX - jy += incY - } - y[iy] += sum - ix += incX - iy += incY - } - } - case blas.Lower: - iy := ky - if incX == 1 { - for i := 0; i < n; i++ { - l := max(0, k-i) - alphaxi := alpha * x[i] - jy := l * incY - aRow := a[i*lda:] - for j := l; j < k; j++ { - v := aRow[j] - y[iy] += alpha * v * x[i-k+j] - y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - y[iy] += alphaxi * complex(real(aRow[k]), 0) - iy += incY - } - } else { - ix := kx - for i := 0; i < n; i++ { - l := max(0, k-i) - alphaxi := alpha * x[ix] - jx := l * incX - jy := l * incY - aRow := a[i*lda:] - for j := l; j < k; j++ { - v := aRow[j] - y[iy] += alpha * v * x[ix-k*incX+jx] - y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) - jx += incX - jy += incY - } - y[iy] += alphaxi * complex(real(aRow[k]), 0) - ix += incX - iy += incY - } - } - } -} - -// Zhemv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where alpha and beta are scalars, x and y are vectors, and A is an n×n -// Hermitian matrix. The imaginary parts of the diagonal elements of A are -// ignored and assumed to be zero. -func (Implementation) Zhemv(uplo blas.Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up the start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - for i, v := range y[:n] { - y[i] = beta * v - } - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - for i := 0; i < n; i++ { - y[iy] = beta * y[iy] - iy += incY - } - } - } - } - - if alpha == 0 { - return - } - - // The elements of A are accessed sequentially with one pass through - // the triangular part of A. - - if uplo == blas.Upper { - // Form y when A is stored in upper triangle. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - var tmp2 complex128 - for j := i + 1; j < n; j++ { - y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[j] - } - aii := complex(real(a[i*lda+i]), 0) - y[i] += tmp1*aii + alpha*tmp2 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - var tmp2 complex128 - jx := ix - jy := iy - for j := i + 1; j < n; j++ { - jx += incX - jy += incY - y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[jx] - } - aii := complex(real(a[i*lda+i]), 0) - y[iy] += tmp1*aii + alpha*tmp2 - ix += incX - iy += incY - } - } - return - } - - // Form y when A is stored in lower triangle. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - var tmp2 complex128 - for j := 0; j < i; j++ { - y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[j] - } - aii := complex(real(a[i*lda+i]), 0) - y[i] += tmp1*aii + alpha*tmp2 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - var tmp2 complex128 - jx := kx - jy := ky - for j := 0; j < i; j++ { - y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[jx] - jx += incX - jy += incY - } - aii := complex(real(a[i*lda+i]), 0) - y[iy] += tmp1*aii + alpha*tmp2 - ix += incX - iy += incY - } - } -} - -// Zher performs the Hermitian rank-one operation -// A += alpha * x * x^H -// where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n -// element vector. On entry, the imaginary parts of the diagonal elements of A -// are ignored and assumed to be zero, on return they will be set to zero. -func (Implementation) Zher(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 { - tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii+xtmp, 0) - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[j]) - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - - ix := kx - for i := 0; i < n; i++ { - if x[ix] != 0 { - tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii+xtmp, 0) - jx := ix + incX - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - } - return - } - - if incX == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 { - tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) - for j := 0; j < i; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[j]) - } - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii+xtmp, 0) - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - - ix := kx - for i := 0; i < n; i++ { - if x[ix] != 0 { - tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) - jx := kx - for j := 0; j < i; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii+xtmp, 0) - - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - } -} - -// Zher2 performs the Hermitian rank-two operation -// A += alpha * x * y^H + conj(alpha) * y * x^H -// where alpha is a scalar, x and y are n element vectors and A is an n×n -// Hermitian matrix. On entry, the imaginary parts of the diagonal elements are -// ignored and assumed to be zero. On return they will be set to zero. -func (Implementation) Zher2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx, ky int - var ix, iy int - if incX != 1 || incY != 1 { - if incX < 0 { - kx = (1 - n) * incX - } - if incY < 0 { - ky = (1 - n) * incY - } - ix = kx - iy = ky - } - if uplo == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii, 0) - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii, 0) - jx := ix + incX - jy := iy + incY - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - iy += incY - } - return - } - - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - for j := 0; j < i; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - } - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii, 0) - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - jx := kx - jy := ky - for j := 0; j < i; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii, 0) - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - iy += incY - } -} - -// Zhpmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where alpha and beta are scalars, x and y are vectors, and A is an n×n -// Hermitian matrix in packed form. The imaginary parts of the diagonal -// elements of A are ignored and assumed to be zero. -func (Implementation) Zhpmv(uplo blas.Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up the start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - for i, v := range y[:n] { - y[i] = beta * v - } - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - for i := 0; i < n; i++ { - y[iy] *= beta - iy += incY - } - } - } - } - - if alpha == 0 { - return - } - - // The elements of A are accessed sequentially with one pass through ap. - - var kk int - if uplo == blas.Upper { - // Form y when ap contains the upper triangle. - // Here, kk points to the current diagonal element in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - y[i] += tmp1 * complex(real(ap[kk]), 0) - var tmp2 complex128 - k := kk + 1 - for j := i + 1; j < n; j++ { - y[j] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[j] - k++ - } - y[i] += alpha * tmp2 - kk += n - i - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - y[iy] += tmp1 * complex(real(ap[kk]), 0) - var tmp2 complex128 - jx := ix - jy := iy - for k := kk + 1; k < kk+n-i; k++ { - jx += incX - jy += incY - y[jy] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[jx] - } - y[iy] += alpha * tmp2 - ix += incX - iy += incY - kk += n - i - } - } - return - } - - // Form y when ap contains the lower triangle. - // Here, kk points to the beginning of current row in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - var tmp2 complex128 - k := kk - for j := 0; j < i; j++ { - y[j] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[j] - k++ - } - aii := complex(real(ap[kk+i]), 0) - y[i] += tmp1*aii + alpha*tmp2 - kk += i + 1 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - var tmp2 complex128 - jx := kx - jy := ky - for k := kk; k < kk+i; k++ { - y[jy] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[jx] - jx += incX - jy += incY - } - aii := complex(real(ap[kk+i]), 0) - y[iy] += tmp1*aii + alpha*tmp2 - ix += incX - iy += incY - kk += i + 1 - } - } -} - -// Zhpr performs the Hermitian rank-1 operation -// A += alpha * x * x^H -// where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix -// in packed form. On entry, the imaginary parts of the diagonal elements are -// assumed to be zero, and on return they are set to zero. -func (Implementation) Zhpr(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, ap []complex128) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through ap. - - var kk int - if uplo == blas.Upper { - // Form A when upper triangle is stored in AP. - // Here, kk points to the current diagonal element in ap. - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if xi != 0 { - aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk] = complex(aii, 0) - - tmp := complex(alpha, 0) * xi - a := ap[kk+1 : kk+n-i] - x := x[i+1 : n] - for j, v := range x { - a[j] += tmp * cmplx.Conj(v) - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - kk += n - i - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - if xi != 0 { - aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk] = complex(aii, 0) - - tmp := complex(alpha, 0) * xi - jx := ix + incX - a := ap[kk+1 : kk+n-i] - for k := range a { - a[k] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - ix += incX - kk += n - i - } - } - return - } - - // Form A when lower triangle is stored in AP. - // Here, kk points to the beginning of current row in ap. - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if xi != 0 { - tmp := complex(alpha, 0) * xi - a := ap[kk : kk+i] - for j, v := range x[:i] { - a[j] += tmp * cmplx.Conj(v) - } - - aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - if xi != 0 { - tmp := complex(alpha, 0) * xi - a := ap[kk : kk+i] - jx := kx - for k := range a { - a[k] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - - aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - ix += incX - kk += i + 1 - } - } -} - -// Zhpr2 performs the Hermitian rank-2 operation -// A += alpha * x * y^H + conj(alpha) * y * x^H -// where alpha is a complex scalar, x and y are n element vectors, and A is an -// n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts -// of the diagonal elements are assumed to be zero, and on return they are set to zero. -func (Implementation) Zhpr2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - // Set up start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // The elements of A are accessed sequentially with one pass through ap. - - var kk int - if uplo == blas.Upper { - // Form A when upper triangle is stored in AP. - // Here, kk points to the current diagonal element in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - ap[kk] = complex(aii, 0) - k := kk + 1 - for j := i + 1; j < n; j++ { - ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - k++ - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - kk += n - i - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - ap[kk] = complex(aii, 0) - jx := ix + incX - jy := iy + incY - for k := kk + 1; k < kk+n-i; k++ { - ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - ix += incX - iy += incY - kk += n - i - } - } - return - } - - // Form A when lower triangle is stored in AP. - // Here, kk points to the beginning of current row in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - k := kk - for j := 0; j < i; j++ { - ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - k++ - } - aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - kk += i + 1 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - jx := kx - jy := ky - for k := kk; k < kk+i; k++ { - ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - ix += incX - iy += incY - kk += i + 1 - } - } -} - -// Ztbmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = A^T * x if trans = blas.Trans -// x = A^H * x if trans = blas.ConjTrans -// where x is an n element vector and A is an n×n triangular band matrix, with -// (k+1) diagonals. -func (Implementation) Ztbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex128, lda int, x []complex128, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - switch trans { - case blas.NoTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if diag == blas.NonUnit { - xi *= a[i*lda] - } - kk := min(k, n-i-1) - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - xi += x[i+j+1] * aij - } - x[i] = xi - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - if diag == blas.NonUnit { - xi *= a[i*lda] - } - kk := min(k, n-i-1) - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - xi += x[jx] * aij - jx += incX - } - x[ix] = xi - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - xi *= a[i*lda+k] - } - kk := min(k, i) - for j, aij := range a[i*lda+k-kk : i*lda+k] { - xi += x[i-kk+j] * aij - } - x[i] = xi - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - xi *= a[i*lda+k] - } - kk := min(k, i) - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - xi += x[jx] * aij - jx += incX - } - x[ix] = xi - ix -= incX - } - } - } - case blas.Trans: - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+j+1] += xi * aij - } - if diag == blas.NonUnit { - x[i] *= a[i*lda] - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - jx := ix + incX - xi := x[ix] - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] += xi * aij - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= a[i*lda] - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] += xi * aij - } - if diag == blas.NonUnit { - x[i] *= a[i*lda+k] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - kk := min(k, i) - jx := ix - kk*incX - xi := x[ix] - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] += xi * aij - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= a[i*lda+k] - } - ix += incX - } - } - } - case blas.ConjTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+j+1] += xi * cmplx.Conj(aij) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - jx := ix + incX - xi := x[ix] - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] += xi * cmplx.Conj(aij) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda]) - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] += xi * cmplx.Conj(aij) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda+k]) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - kk := min(k, i) - jx := ix - kk*incX - xi := x[ix] - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] += xi * cmplx.Conj(aij) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda+k]) - } - ix += incX - } - } - } - } -} - -// Ztbsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// A^T * x = b if trans == blas.Trans -// A^H * x = b if trans == blas.ConjTrans -// where b and x are n element vectors and A is an n×n triangular band matrix -// with (k+1) diagonals. -// -// On entry, x contains the values of b, and the solution is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Ztbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex128, lda int, x []complex128, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - switch trans { - case blas.NoTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - var sum complex128 - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - sum += x[i+1+j] * aij - } - x[i] -= sum - if diag == blas.NonUnit { - x[i] /= a[i*lda] - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - var sum complex128 - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - sum += x[jx] * aij - jx += incX - } - x[ix] -= sum - if diag == blas.NonUnit { - x[ix] /= a[i*lda] - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - kk := min(k, i) - var sum complex128 - for j, aij := range a[i*lda+k-kk : i*lda+k] { - sum += x[i-kk+j] * aij - } - x[i] -= sum - if diag == blas.NonUnit { - x[i] /= a[i*lda+k] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - kk := min(k, i) - var sum complex128 - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - sum += x[jx] * aij - jx += incX - } - x[ix] -= sum - if diag == blas.NonUnit { - x[ix] /= a[i*lda+k] - } - ix += incX - } - } - } - case blas.Trans: - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[i] /= a[i*lda] - } - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+1+j] -= xi * aij - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] /= a[i*lda] - } - kk := min(k, n-i-1) - xi := x[ix] - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] -= xi * aij - jx += incX - } - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] /= a[i*lda+k] - } - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] -= xi * aij - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] /= a[i*lda+k] - } - kk := min(k, i) - xi := x[ix] - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] -= xi * aij - jx += incX - } - ix -= incX - } - } - } - case blas.ConjTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[i] /= cmplx.Conj(a[i*lda]) - } - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+1+j] -= xi * cmplx.Conj(aij) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] /= cmplx.Conj(a[i*lda]) - } - kk := min(k, n-i-1) - xi := x[ix] - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] -= xi * cmplx.Conj(aij) - jx += incX - } - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] /= cmplx.Conj(a[i*lda+k]) - } - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] -= xi * cmplx.Conj(aij) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] /= cmplx.Conj(a[i*lda+k]) - } - kk := min(k, i) - xi := x[ix] - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] -= xi * cmplx.Conj(aij) - jx += incX - } - ix -= incX - } - } - } - } -} - -// Ztpmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = A^T * x if trans = blas.Trans -// x = A^H * x if trans = blas.ConjTrans -// where x is an n element vector and A is an n×n triangular matrix, supplied in -// packed form. -func (Implementation) Ztpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through A. - - if trans == blas.NoTrans { - // Form x = A*x. - if uplo == blas.Upper { - // kk points to the current diagonal element in ap. - kk := 0 - if incX == 1 { - x = x[:n] - for i := range x { - if diag == blas.NonUnit { - x[i] *= ap[kk] - } - if n-i-1 > 0 { - x[i] += c128.DotuUnitary(ap[kk+1:kk+n-i], x[i+1:]) - } - kk += n - i - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] *= ap[kk] - } - if n-i-1 > 0 { - x[ix] += c128.DotuInc(ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix += incX - kk += n - i - } - } - } else { - // kk points to the beginning of current row in ap. - kk := n*(n+1)/2 - n - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] *= ap[kk+i] - } - if i > 0 { - x[i] += c128.DotuUnitary(ap[kk:kk+i], x[:i]) - } - kk -= i - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] *= ap[kk+i] - } - if i > 0 { - x[ix] += c128.DotuInc(ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - ix -= incX - kk -= i - } - } - } - return - } - - if trans == blas.Trans { - // Form x = A^T*x. - if uplo == blas.Upper { - // kk points to the current diagonal element in ap. - kk := n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= ap[kk] - } - if n-i-1 > 0 { - c128.AxpyUnitary(xi, ap[kk+1:kk+n-i], x[i+1:n]) - } - kk -= n - i + 1 - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= ap[kk] - } - if n-i-1 > 0 { - c128.AxpyInc(xi, ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix -= incX - kk -= n - i + 1 - } - } - } else { - // kk points to the beginning of current row in ap. - kk := 0 - if incX == 1 { - x = x[:n] - for i := range x { - if i > 0 { - c128.AxpyUnitary(x[i], ap[kk:kk+i], x[:i]) - } - if diag == blas.NonUnit { - x[i] *= ap[kk+i] - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - c128.AxpyInc(x[ix], ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - if diag == blas.NonUnit { - x[ix] *= ap[kk+i] - } - ix += incX - kk += i + 1 - } - } - } - return - } - - // Form x = A^H*x. - if uplo == blas.Upper { - // kk points to the current diagonal element in ap. - kk := n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(ap[kk]) - } - k := kk + 1 - for j := i + 1; j < n; j++ { - x[j] += xi * cmplx.Conj(ap[k]) - k++ - } - kk -= n - i + 1 - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(ap[kk]) - } - jx := ix + incX - k := kk + 1 - for j := i + 1; j < n; j++ { - x[jx] += xi * cmplx.Conj(ap[k]) - jx += incX - k++ - } - ix -= incX - kk -= n - i + 1 - } - } - } else { - // kk points to the beginning of current row in ap. - kk := 0 - if incX == 1 { - x = x[:n] - for i, xi := range x { - for j := 0; j < i; j++ { - x[j] += xi * cmplx.Conj(ap[kk+j]) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(ap[kk+i]) - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - jx := kx - for j := 0; j < i; j++ { - x[jx] += xi * cmplx.Conj(ap[kk+j]) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(ap[kk+i]) - } - ix += incX - kk += i + 1 - } - } - } -} - -// Ztpsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// A^T * x = b if trans == blas.Trans -// A^H * x = b if trans == blas.ConjTrans -// where b and x are n element vectors and A is an n×n triangular matrix in -// packed form. -// -// On entry, x contains the values of b, and the solution is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Ztpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through ap. - - if trans == blas.NoTrans { - // Form x = inv(A)*x. - if uplo == blas.Upper { - kk := n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - aii := ap[kk] - if n-i-1 > 0 { - x[i] -= c128.DotuUnitary(x[i+1:n], ap[kk+1:kk+n-i]) - } - if diag == blas.NonUnit { - x[i] /= aii - } - kk -= n - i + 1 - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - aii := ap[kk] - if n-i-1 > 0 { - x[ix] -= c128.DotuInc(x, ap[kk+1:kk+n-i], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) - } - if diag == blas.NonUnit { - x[ix] /= aii - } - ix -= incX - kk -= n - i + 1 - } - } - } else { - kk := 0 - if incX == 1 { - for i := 0; i < n; i++ { - if i > 0 { - x[i] -= c128.DotuUnitary(x[:i], ap[kk:kk+i]) - } - if diag == blas.NonUnit { - x[i] /= ap[kk+i] - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - x[ix] -= c128.DotuInc(x, ap[kk:kk+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) - } - if diag == blas.NonUnit { - x[ix] /= ap[kk+i] - } - ix += incX - kk += i + 1 - } - } - } - return - } - - if trans == blas.Trans { - // Form x = inv(A^T)*x. - if uplo == blas.Upper { - kk := 0 - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= ap[kk] - } - if n-j-1 > 0 { - c128.AxpyUnitary(-x[j], ap[kk+1:kk+n-j], x[j+1:n]) - } - kk += n - j - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= ap[kk] - } - if n-j-1 > 0 { - c128.AxpyInc(-x[jx], ap[kk+1:kk+n-j], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) - } - jx += incX - kk += n - j - } - } - } else { - kk := n*(n+1)/2 - n - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= ap[kk+j] - } - if j > 0 { - c128.AxpyUnitary(-x[j], ap[kk:kk+j], x[:j]) - } - kk -= j - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= ap[kk+j] - } - if j > 0 { - c128.AxpyInc(-x[jx], ap[kk:kk+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) - } - jx -= incX - kk -= j - } - } - } - return - } - - // Form x = inv(A^H)*x. - if uplo == blas.Upper { - kk := 0 - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(ap[kk]) - } - xj := x[j] - k := kk + 1 - for i := j + 1; i < n; i++ { - x[i] -= xj * cmplx.Conj(ap[k]) - k++ - } - kk += n - j - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(ap[kk]) - } - xj := x[jx] - ix := jx + incX - k := kk + 1 - for i := j + 1; i < n; i++ { - x[ix] -= xj * cmplx.Conj(ap[k]) - ix += incX - k++ - } - jx += incX - kk += n - j - } - } - } else { - kk := n*(n+1)/2 - n - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(ap[kk+j]) - } - xj := x[j] - for i := 0; i < j; i++ { - x[i] -= xj * cmplx.Conj(ap[kk+i]) - } - kk -= j - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(ap[kk+j]) - } - xj := x[jx] - ix := kx - for i := 0; i < j; i++ { - x[ix] -= xj * cmplx.Conj(ap[kk+i]) - ix += incX - } - jx -= incX - kk -= j - } - } - } -} - -// Ztrmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = A^T * x if trans = blas.Trans -// x = A^H * x if trans = blas.ConjTrans -// where x is a vector, and A is an n×n triangular matrix. -func (Implementation) Ztrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through A. - - if trans == blas.NoTrans { - // Form x = A*x. - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - if n-i-1 > 0 { - x[i] += c128.DotuUnitary(a[i*lda+i+1:i*lda+n], x[i+1:n]) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - if n-i-1 > 0 { - x[ix] += c128.DotuInc(a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - if i > 0 { - x[i] += c128.DotuUnitary(a[i*lda:i*lda+i], x[:i]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - if i > 0 { - x[ix] += c128.DotuInc(a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - ix -= incX - } - } - } - return - } - - if trans == blas.Trans { - // Form x = A^T*x. - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - if n-i-1 > 0 { - c128.AxpyUnitary(xi, a[i*lda+i+1:i*lda+n], x[i+1:n]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - if n-i-1 > 0 { - c128.AxpyInc(xi, a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - if i > 0 { - c128.AxpyUnitary(x[i], a[i*lda:i*lda+i], x[:i]) - } - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - c128.AxpyInc(x[ix], a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - ix += incX - } - } - } - return - } - - // Form x = A^H*x. - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda+i]) - } - for j := i + 1; j < n; j++ { - x[j] += xi * cmplx.Conj(a[i*lda+j]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda+i]) - } - jx := ix + incX - for j := i + 1; j < n; j++ { - x[jx] += xi * cmplx.Conj(a[i*lda+j]) - jx += incX - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - for j := 0; j < i; j++ { - x[j] += x[i] * cmplx.Conj(a[i*lda+j]) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda+i]) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - jx := kx - for j := 0; j < i; j++ { - x[jx] += x[ix] * cmplx.Conj(a[i*lda+j]) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda+i]) - } - ix += incX - } - } - } -} - -// Ztrsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// A^T * x = b if trans == blas.Trans -// A^H * x = b if trans == blas.ConjTrans -// where b and x are n element vectors and A is an n×n triangular matrix. -// -// On entry, x contains the values of b, and the solution is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Ztrsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through A. - - if trans == blas.NoTrans { - // Form x = inv(A)*x. - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - aii := a[i*lda+i] - if n-i-1 > 0 { - x[i] -= c128.DotuUnitary(x[i+1:n], a[i*lda+i+1:i*lda+n]) - } - if diag == blas.NonUnit { - x[i] /= aii - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - aii := a[i*lda+i] - if n-i-1 > 0 { - x[ix] -= c128.DotuInc(x, a[i*lda+i+1:i*lda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) - } - if diag == blas.NonUnit { - x[ix] /= aii - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - if i > 0 { - x[i] -= c128.DotuUnitary(x[:i], a[i*lda:i*lda+i]) - } - if diag == blas.NonUnit { - x[i] /= a[i*lda+i] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - x[ix] -= c128.DotuInc(x, a[i*lda:i*lda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) - } - if diag == blas.NonUnit { - x[ix] /= a[i*lda+i] - } - ix += incX - } - } - } - return - } - - if trans == blas.Trans { - // Form x = inv(A^T)*x. - if uplo == blas.Upper { - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= a[j*lda+j] - } - if n-j-1 > 0 { - c128.AxpyUnitary(-x[j], a[j*lda+j+1:j*lda+n], x[j+1:n]) - } - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= a[j*lda+j] - } - if n-j-1 > 0 { - c128.AxpyInc(-x[jx], a[j*lda+j+1:j*lda+n], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) - } - jx += incX - } - } - } else { - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= a[j*lda+j] - } - xj := x[j] - if j > 0 { - c128.AxpyUnitary(-xj, a[j*lda:j*lda+j], x[:j]) - } - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= a[j*lda+j] - } - if j > 0 { - c128.AxpyInc(-x[jx], a[j*lda:j*lda+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) - } - jx -= incX - } - } - } - return - } - - // Form x = inv(A^H)*x. - if uplo == blas.Upper { - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[j] - for i := j + 1; i < n; i++ { - x[i] -= xj * cmplx.Conj(a[j*lda+i]) - } - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[jx] - ix := jx + incX - for i := j + 1; i < n; i++ { - x[ix] -= xj * cmplx.Conj(a[j*lda+i]) - ix += incX - } - jx += incX - } - } - } else { - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[j] - for i := 0; i < j; i++ { - x[i] -= xj * cmplx.Conj(a[j*lda+i]) - } - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[jx] - ix := kx - for i := 0; i < j; i++ { - x[ix] -= xj * cmplx.Conj(a[j*lda+i]) - ix += incX - } - jx -= incX - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go deleted file mode 100644 index 10faf8f7d..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go +++ /dev/null @@ -1,2942 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - cmplx "gonum.org/v1/gonum/internal/cmplx64" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/c64" -) - -var _ blas.Complex64Level2 = Implementation{} - -// Cgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * A^T * x + beta * y if trans = blas.Trans -// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans -// where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix -// with kL sub-diagonals and kU super-diagonals. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if kL < 0 { - panic(kLLT0) - } - if kU < 0 { - panic(kULT0) - } - if lda < kL+kU+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { - panic(shortA) - } - var lenX, lenY int - if trans == blas.NoTrans { - lenX, lenY = n, m - } else { - lenX, lenY = m, n - } - if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - var kx int - if incX < 0 { - kx = (1 - lenX) * incX - } - var ky int - if incY < 0 { - ky = (1 - lenY) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:lenY] { - y[i] = 0 - } - } else { - c64.ScalUnitary(beta, y[:lenY]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < lenY; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - c64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) - } else { - c64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) - } - } - } - } - - nRow := min(m, n+kL) - nCol := kL + 1 + kU - switch trans { - case blas.NoTrans: - iy := ky - if incX == 1 { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) - xtmp := x[off : off+u-l] - var sum complex64 - for j, v := range aRow { - sum += xtmp[j] * v - } - y[iy] += alpha * sum - iy += incY - } - } else { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incX - jx := kx - var sum complex64 - for _, v := range aRow { - sum += x[off+jx] * v - jx += incX - } - y[iy] += alpha * sum - iy += incY - } - } - case blas.Trans: - if incX == 1 { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[i] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * v - jy += incY - } - } - } else { - ix := kx - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[ix] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * v - jy += incY - } - ix += incX - } - } - case blas.ConjTrans: - if incX == 1 { - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[i] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - } - } else { - ix := kx - for i := 0; i < nRow; i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - aRow := a[i*lda+l : i*lda+u] - off := max(0, i-kL) * incY - alphaxi := alpha * x[ix] - jy := ky - for _, v := range aRow { - y[off+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - ix += incX - } - } - } -} - -// Cgemv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if trans = blas.NoTrans -// y = alpha * A^T * x + beta * y if trans = blas.Trans -// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans -// where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cgemv(trans blas.Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - var lenX, lenY int - if trans == blas.NoTrans { - lenX = n - lenY = m - } else { - lenX = m - lenY = n - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - var kx int - if incX < 0 { - kx = (1 - lenX) * incX - } - var ky int - if incY < 0 { - ky = (1 - lenY) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:lenY] { - y[i] = 0 - } - } else { - c64.ScalUnitary(beta, y[:lenY]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < lenY; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - c64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) - } else { - c64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - switch trans { - default: - // Form y = alpha*A*x + y. - iy := ky - if incX == 1 { - for i := 0; i < m; i++ { - y[iy] += alpha * c64.DotuUnitary(a[i*lda:i*lda+n], x[:n]) - iy += incY - } - return - } - for i := 0; i < m; i++ { - y[iy] += alpha * c64.DotuInc(a[i*lda:i*lda+n], x, uintptr(n), 1, uintptr(incX), 0, uintptr(kx)) - iy += incY - } - return - - case blas.Trans: - // Form y = alpha*A^T*x + y. - ix := kx - if incY == 1 { - for i := 0; i < m; i++ { - c64.AxpyUnitary(alpha*x[ix], a[i*lda:i*lda+n], y[:n]) - ix += incX - } - return - } - for i := 0; i < m; i++ { - c64.AxpyInc(alpha*x[ix], a[i*lda:i*lda+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) - ix += incX - } - return - - case blas.ConjTrans: - // Form y = alpha*A^H*x + y. - ix := kx - if incY == 1 { - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - for j := 0; j < n; j++ { - y[j] += tmp * cmplx.Conj(a[i*lda+j]) - } - ix += incX - } - return - } - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - jy := ky - for j := 0; j < n; j++ { - y[jy] += tmp * cmplx.Conj(a[i*lda+j]) - jy += incY - } - ix += incX - } - return - } -} - -// Cgerc performs the rank-one operation -// A += alpha * x * y^H -// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, -// and y is an n element vector. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx, jy int - if incX < 0 { - kx = (1 - m) * incX - } - if incY < 0 { - jy = (1 - n) * incY - } - for j := 0; j < n; j++ { - if y[jy] != 0 { - tmp := alpha * cmplx.Conj(y[jy]) - c64.AxpyInc(tmp, x, a[j:], uintptr(m), uintptr(incX), uintptr(lda), uintptr(kx), 0) - } - jy += incY - } -} - -// Cgeru performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, -// and y is an n element vector. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx int - if incX < 0 { - kx = (1 - m) * incX - } - if incY == 1 { - for i := 0; i < m; i++ { - if x[kx] != 0 { - tmp := alpha * x[kx] - c64.AxpyUnitary(tmp, y[:n], a[i*lda:i*lda+n]) - } - kx += incX - } - return - } - var jy int - if incY < 0 { - jy = (1 - n) * incY - } - for i := 0; i < m; i++ { - if x[kx] != 0 { - tmp := alpha * x[kx] - c64.AxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(jy), 0) - } - kx += incX - } -} - -// Chbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where alpha and beta are scalars, x and y are vectors, and A is an n×n -// Hermitian band matrix with k super-diagonals. The imaginary parts of -// the diagonal elements of A are ignored and assumed to be zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Chbmv(uplo blas.Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up the start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - for i, v := range y[:n] { - y[i] = beta * v - } - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - for i := 0; i < n; i++ { - y[iy] = beta * y[iy] - iy += incY - } - } - } - } - - if alpha == 0 { - return - } - - // The elements of A are accessed sequentially with one pass through a. - switch uplo { - case blas.Upper: - iy := ky - if incX == 1 { - for i := 0; i < n; i++ { - aRow := a[i*lda:] - alphaxi := alpha * x[i] - sum := alphaxi * complex(real(aRow[0]), 0) - u := min(k+1, n-i) - jy := incY - for j := 1; j < u; j++ { - v := aRow[j] - sum += alpha * x[i+j] * v - y[iy+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - y[iy] += sum - iy += incY - } - } else { - ix := kx - for i := 0; i < n; i++ { - aRow := a[i*lda:] - alphaxi := alpha * x[ix] - sum := alphaxi * complex(real(aRow[0]), 0) - u := min(k+1, n-i) - jx := incX - jy := incY - for j := 1; j < u; j++ { - v := aRow[j] - sum += alpha * x[ix+jx] * v - y[iy+jy] += alphaxi * cmplx.Conj(v) - jx += incX - jy += incY - } - y[iy] += sum - ix += incX - iy += incY - } - } - case blas.Lower: - iy := ky - if incX == 1 { - for i := 0; i < n; i++ { - l := max(0, k-i) - alphaxi := alpha * x[i] - jy := l * incY - aRow := a[i*lda:] - for j := l; j < k; j++ { - v := aRow[j] - y[iy] += alpha * v * x[i-k+j] - y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) - jy += incY - } - y[iy] += alphaxi * complex(real(aRow[k]), 0) - iy += incY - } - } else { - ix := kx - for i := 0; i < n; i++ { - l := max(0, k-i) - alphaxi := alpha * x[ix] - jx := l * incX - jy := l * incY - aRow := a[i*lda:] - for j := l; j < k; j++ { - v := aRow[j] - y[iy] += alpha * v * x[ix-k*incX+jx] - y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) - jx += incX - jy += incY - } - y[iy] += alphaxi * complex(real(aRow[k]), 0) - ix += incX - iy += incY - } - } - } -} - -// Chemv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where alpha and beta are scalars, x and y are vectors, and A is an n×n -// Hermitian matrix. The imaginary parts of the diagonal elements of A are -// ignored and assumed to be zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Chemv(uplo blas.Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up the start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - for i, v := range y[:n] { - y[i] = beta * v - } - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - for i := 0; i < n; i++ { - y[iy] = beta * y[iy] - iy += incY - } - } - } - } - - if alpha == 0 { - return - } - - // The elements of A are accessed sequentially with one pass through - // the triangular part of A. - - if uplo == blas.Upper { - // Form y when A is stored in upper triangle. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - var tmp2 complex64 - for j := i + 1; j < n; j++ { - y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[j] - } - aii := complex(real(a[i*lda+i]), 0) - y[i] += tmp1*aii + alpha*tmp2 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - var tmp2 complex64 - jx := ix - jy := iy - for j := i + 1; j < n; j++ { - jx += incX - jy += incY - y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[jx] - } - aii := complex(real(a[i*lda+i]), 0) - y[iy] += tmp1*aii + alpha*tmp2 - ix += incX - iy += incY - } - } - return - } - - // Form y when A is stored in lower triangle. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - var tmp2 complex64 - for j := 0; j < i; j++ { - y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[j] - } - aii := complex(real(a[i*lda+i]), 0) - y[i] += tmp1*aii + alpha*tmp2 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - var tmp2 complex64 - jx := kx - jy := ky - for j := 0; j < i; j++ { - y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) - tmp2 += a[i*lda+j] * x[jx] - jx += incX - jy += incY - } - aii := complex(real(a[i*lda+i]), 0) - y[iy] += tmp1*aii + alpha*tmp2 - ix += incX - iy += incY - } - } -} - -// Cher performs the Hermitian rank-one operation -// A += alpha * x * x^H -// where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n -// element vector. On entry, the imaginary parts of the diagonal elements of A -// are ignored and assumed to be zero, on return they will be set to zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cher(uplo blas.Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 { - tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii+xtmp, 0) - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[j]) - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - - ix := kx - for i := 0; i < n; i++ { - if x[ix] != 0 { - tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii+xtmp, 0) - jx := ix + incX - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - } - return - } - - if incX == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 { - tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) - for j := 0; j < i; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[j]) - } - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii+xtmp, 0) - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - - ix := kx - for i := 0; i < n; i++ { - if x[ix] != 0 { - tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) - jx := kx - for j := 0; j < i; j++ { - a[i*lda+j] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - aii := real(a[i*lda+i]) - xtmp := real(tmp * cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii+xtmp, 0) - - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - } -} - -// Cher2 performs the Hermitian rank-two operation -// A += alpha * x * y^H + conj(alpha) * y * x^H -// where alpha is a scalar, x and y are n element vectors and A is an n×n -// Hermitian matrix. On entry, the imaginary parts of the diagonal elements are -// ignored and assumed to be zero. On return they will be set to zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cher2(uplo blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var kx, ky int - var ix, iy int - if incX != 1 || incY != 1 { - if incX < 0 { - kx = (1 - n) * incX - } - if incY < 0 { - ky = (1 - n) * incY - } - ix = kx - iy = ky - } - if uplo == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii, 0) - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii, 0) - jx := ix + incX - jy := iy + incY - for j := i + 1; j < n; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - iy += incY - } - return - } - - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - for j := 0; j < i; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - } - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - a[i*lda+i] = complex(aii, 0) - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - } - return - } - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - jx := kx - jy := ky - for j := 0; j < i; j++ { - a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - a[i*lda+i] = complex(aii, 0) - } else { - aii := real(a[i*lda+i]) - a[i*lda+i] = complex(aii, 0) - } - ix += incX - iy += incY - } -} - -// Chpmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where alpha and beta are scalars, x and y are vectors, and A is an n×n -// Hermitian matrix in packed form. The imaginary parts of the diagonal -// elements of A are ignored and assumed to be zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Chpmv(uplo blas.Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up the start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // Form y = beta*y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - for i, v := range y[:n] { - y[i] = beta * v - } - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - for i := 0; i < n; i++ { - y[iy] *= beta - iy += incY - } - } - } - } - - if alpha == 0 { - return - } - - // The elements of A are accessed sequentially with one pass through ap. - - var kk int - if uplo == blas.Upper { - // Form y when ap contains the upper triangle. - // Here, kk points to the current diagonal element in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - y[i] += tmp1 * complex(real(ap[kk]), 0) - var tmp2 complex64 - k := kk + 1 - for j := i + 1; j < n; j++ { - y[j] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[j] - k++ - } - y[i] += alpha * tmp2 - kk += n - i - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - y[iy] += tmp1 * complex(real(ap[kk]), 0) - var tmp2 complex64 - jx := ix - jy := iy - for k := kk + 1; k < kk+n-i; k++ { - jx += incX - jy += incY - y[jy] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[jx] - } - y[iy] += alpha * tmp2 - ix += incX - iy += incY - kk += n - i - } - } - return - } - - // Form y when ap contains the lower triangle. - // Here, kk points to the beginning of current row in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - tmp1 := alpha * x[i] - var tmp2 complex64 - k := kk - for j := 0; j < i; j++ { - y[j] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[j] - k++ - } - aii := complex(real(ap[kk+i]), 0) - y[i] += tmp1*aii + alpha*tmp2 - kk += i + 1 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - tmp1 := alpha * x[ix] - var tmp2 complex64 - jx := kx - jy := ky - for k := kk; k < kk+i; k++ { - y[jy] += tmp1 * cmplx.Conj(ap[k]) - tmp2 += ap[k] * x[jx] - jx += incX - jy += incY - } - aii := complex(real(ap[kk+i]), 0) - y[iy] += tmp1*aii + alpha*tmp2 - ix += incX - iy += incY - kk += i + 1 - } - } -} - -// Chpr performs the Hermitian rank-1 operation -// A += alpha * x * x^H -// where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix -// in packed form. On entry, the imaginary parts of the diagonal elements are -// assumed to be zero, and on return they are set to zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Chpr(uplo blas.Uplo, n int, alpha float32, x []complex64, incX int, ap []complex64) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through ap. - - var kk int - if uplo == blas.Upper { - // Form A when upper triangle is stored in AP. - // Here, kk points to the current diagonal element in ap. - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if xi != 0 { - aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk] = complex(aii, 0) - - tmp := complex(alpha, 0) * xi - a := ap[kk+1 : kk+n-i] - x := x[i+1 : n] - for j, v := range x { - a[j] += tmp * cmplx.Conj(v) - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - kk += n - i - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - if xi != 0 { - aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk] = complex(aii, 0) - - tmp := complex(alpha, 0) * xi - jx := ix + incX - a := ap[kk+1 : kk+n-i] - for k := range a { - a[k] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - ix += incX - kk += n - i - } - } - return - } - - // Form A when lower triangle is stored in AP. - // Here, kk points to the beginning of current row in ap. - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if xi != 0 { - tmp := complex(alpha, 0) * xi - a := ap[kk : kk+i] - for j, v := range x[:i] { - a[j] += tmp * cmplx.Conj(v) - } - - aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - if xi != 0 { - tmp := complex(alpha, 0) * xi - a := ap[kk : kk+i] - jx := kx - for k := range a { - a[k] += tmp * cmplx.Conj(x[jx]) - jx += incX - } - - aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - ix += incX - kk += i + 1 - } - } -} - -// Chpr2 performs the Hermitian rank-2 operation -// A += alpha * x * y^H + conj(alpha) * y * x^H -// where alpha is a complex scalar, x and y are n element vectors, and A is an -// n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts -// of the diagonal elements are assumed to be zero, and on return they are set to zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Chpr2(uplo blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - // Set up start indices in X and Y. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - var ky int - if incY < 0 { - ky = (1 - n) * incY - } - - // The elements of A are accessed sequentially with one pass through ap. - - var kk int - if uplo == blas.Upper { - // Form A when upper triangle is stored in AP. - // Here, kk points to the current diagonal element in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - ap[kk] = complex(aii, 0) - k := kk + 1 - for j := i + 1; j < n; j++ { - ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - k++ - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - kk += n - i - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - ap[kk] = complex(aii, 0) - jx := ix + incX - jy := iy + incY - for k := kk + 1; k < kk+n-i; k++ { - ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - } else { - ap[kk] = complex(real(ap[kk]), 0) - } - ix += incX - iy += incY - kk += n - i - } - } - return - } - - // Form A when lower triangle is stored in AP. - // Here, kk points to the beginning of current row in ap. - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - if x[i] != 0 || y[i] != 0 { - tmp1 := alpha * x[i] - tmp2 := cmplx.Conj(alpha) * y[i] - k := kk - for j := 0; j < i; j++ { - ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) - k++ - } - aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - kk += i + 1 - } - } else { - ix := kx - iy := ky - for i := 0; i < n; i++ { - if x[ix] != 0 || y[iy] != 0 { - tmp1 := alpha * x[ix] - tmp2 := cmplx.Conj(alpha) * y[iy] - jx := kx - jy := ky - for k := kk; k < kk+i; k++ { - ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) - jx += incX - jy += incY - } - aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) - ap[kk+i] = complex(aii, 0) - } else { - ap[kk+i] = complex(real(ap[kk+i]), 0) - } - ix += incX - iy += incY - kk += i + 1 - } - } -} - -// Ctbmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = A^T * x if trans = blas.Trans -// x = A^H * x if trans = blas.ConjTrans -// where x is an n element vector and A is an n×n triangular band matrix, with -// (k+1) diagonals. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - switch trans { - case blas.NoTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if diag == blas.NonUnit { - xi *= a[i*lda] - } - kk := min(k, n-i-1) - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - xi += x[i+j+1] * aij - } - x[i] = xi - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - if diag == blas.NonUnit { - xi *= a[i*lda] - } - kk := min(k, n-i-1) - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - xi += x[jx] * aij - jx += incX - } - x[ix] = xi - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - xi *= a[i*lda+k] - } - kk := min(k, i) - for j, aij := range a[i*lda+k-kk : i*lda+k] { - xi += x[i-kk+j] * aij - } - x[i] = xi - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - xi *= a[i*lda+k] - } - kk := min(k, i) - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - xi += x[jx] * aij - jx += incX - } - x[ix] = xi - ix -= incX - } - } - } - case blas.Trans: - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+j+1] += xi * aij - } - if diag == blas.NonUnit { - x[i] *= a[i*lda] - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - jx := ix + incX - xi := x[ix] - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] += xi * aij - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= a[i*lda] - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] += xi * aij - } - if diag == blas.NonUnit { - x[i] *= a[i*lda+k] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - kk := min(k, i) - jx := ix - kk*incX - xi := x[ix] - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] += xi * aij - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= a[i*lda+k] - } - ix += incX - } - } - } - case blas.ConjTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+j+1] += xi * cmplx.Conj(aij) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - jx := ix + incX - xi := x[ix] - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] += xi * cmplx.Conj(aij) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda]) - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] += xi * cmplx.Conj(aij) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda+k]) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - kk := min(k, i) - jx := ix - kk*incX - xi := x[ix] - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] += xi * cmplx.Conj(aij) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda+k]) - } - ix += incX - } - } - } - } -} - -// Ctbsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// A^T * x = b if trans == blas.Trans -// A^H * x = b if trans == blas.ConjTrans -// where b and x are n element vectors and A is an n×n triangular band matrix -// with (k+1) diagonals. -// -// On entry, x contains the values of b, and the solution is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - switch trans { - case blas.NoTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - var sum complex64 - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - sum += x[i+1+j] * aij - } - x[i] -= sum - if diag == blas.NonUnit { - x[i] /= a[i*lda] - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - kk := min(k, n-i-1) - var sum complex64 - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - sum += x[jx] * aij - jx += incX - } - x[ix] -= sum - if diag == blas.NonUnit { - x[ix] /= a[i*lda] - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - kk := min(k, i) - var sum complex64 - for j, aij := range a[i*lda+k-kk : i*lda+k] { - sum += x[i-kk+j] * aij - } - x[i] -= sum - if diag == blas.NonUnit { - x[i] /= a[i*lda+k] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - kk := min(k, i) - var sum complex64 - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - sum += x[jx] * aij - jx += incX - } - x[ix] -= sum - if diag == blas.NonUnit { - x[ix] /= a[i*lda+k] - } - ix += incX - } - } - } - case blas.Trans: - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[i] /= a[i*lda] - } - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+1+j] -= xi * aij - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] /= a[i*lda] - } - kk := min(k, n-i-1) - xi := x[ix] - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] -= xi * aij - jx += incX - } - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] /= a[i*lda+k] - } - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] -= xi * aij - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] /= a[i*lda+k] - } - kk := min(k, i) - xi := x[ix] - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] -= xi * aij - jx += incX - } - ix -= incX - } - } - } - case blas.ConjTrans: - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[i] /= cmplx.Conj(a[i*lda]) - } - kk := min(k, n-i-1) - xi := x[i] - for j, aij := range a[i*lda+1 : i*lda+kk+1] { - x[i+1+j] -= xi * cmplx.Conj(aij) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] /= cmplx.Conj(a[i*lda]) - } - kk := min(k, n-i-1) - xi := x[ix] - jx := ix + incX - for _, aij := range a[i*lda+1 : i*lda+kk+1] { - x[jx] -= xi * cmplx.Conj(aij) - jx += incX - } - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] /= cmplx.Conj(a[i*lda+k]) - } - kk := min(k, i) - xi := x[i] - for j, aij := range a[i*lda+k-kk : i*lda+k] { - x[i-kk+j] -= xi * cmplx.Conj(aij) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] /= cmplx.Conj(a[i*lda+k]) - } - kk := min(k, i) - xi := x[ix] - jx := ix - kk*incX - for _, aij := range a[i*lda+k-kk : i*lda+k] { - x[jx] -= xi * cmplx.Conj(aij) - jx += incX - } - ix -= incX - } - } - } - } -} - -// Ctpmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = A^T * x if trans = blas.Trans -// x = A^H * x if trans = blas.ConjTrans -// where x is an n element vector and A is an n×n triangular matrix, supplied in -// packed form. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex64, x []complex64, incX int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through A. - - if trans == blas.NoTrans { - // Form x = A*x. - if uplo == blas.Upper { - // kk points to the current diagonal element in ap. - kk := 0 - if incX == 1 { - x = x[:n] - for i := range x { - if diag == blas.NonUnit { - x[i] *= ap[kk] - } - if n-i-1 > 0 { - x[i] += c64.DotuUnitary(ap[kk+1:kk+n-i], x[i+1:]) - } - kk += n - i - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] *= ap[kk] - } - if n-i-1 > 0 { - x[ix] += c64.DotuInc(ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix += incX - kk += n - i - } - } - } else { - // kk points to the beginning of current row in ap. - kk := n*(n+1)/2 - n - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] *= ap[kk+i] - } - if i > 0 { - x[i] += c64.DotuUnitary(ap[kk:kk+i], x[:i]) - } - kk -= i - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] *= ap[kk+i] - } - if i > 0 { - x[ix] += c64.DotuInc(ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - ix -= incX - kk -= i - } - } - } - return - } - - if trans == blas.Trans { - // Form x = A^T*x. - if uplo == blas.Upper { - // kk points to the current diagonal element in ap. - kk := n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= ap[kk] - } - if n-i-1 > 0 { - c64.AxpyUnitary(xi, ap[kk+1:kk+n-i], x[i+1:n]) - } - kk -= n - i + 1 - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= ap[kk] - } - if n-i-1 > 0 { - c64.AxpyInc(xi, ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix -= incX - kk -= n - i + 1 - } - } - } else { - // kk points to the beginning of current row in ap. - kk := 0 - if incX == 1 { - x = x[:n] - for i := range x { - if i > 0 { - c64.AxpyUnitary(x[i], ap[kk:kk+i], x[:i]) - } - if diag == blas.NonUnit { - x[i] *= ap[kk+i] - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - c64.AxpyInc(x[ix], ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - if diag == blas.NonUnit { - x[ix] *= ap[kk+i] - } - ix += incX - kk += i + 1 - } - } - } - return - } - - // Form x = A^H*x. - if uplo == blas.Upper { - // kk points to the current diagonal element in ap. - kk := n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(ap[kk]) - } - k := kk + 1 - for j := i + 1; j < n; j++ { - x[j] += xi * cmplx.Conj(ap[k]) - k++ - } - kk -= n - i + 1 - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(ap[kk]) - } - jx := ix + incX - k := kk + 1 - for j := i + 1; j < n; j++ { - x[jx] += xi * cmplx.Conj(ap[k]) - jx += incX - k++ - } - ix -= incX - kk -= n - i + 1 - } - } - } else { - // kk points to the beginning of current row in ap. - kk := 0 - if incX == 1 { - x = x[:n] - for i, xi := range x { - for j := 0; j < i; j++ { - x[j] += xi * cmplx.Conj(ap[kk+j]) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(ap[kk+i]) - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - jx := kx - for j := 0; j < i; j++ { - x[jx] += xi * cmplx.Conj(ap[kk+j]) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(ap[kk+i]) - } - ix += incX - kk += i + 1 - } - } - } -} - -// Ctpsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// A^T * x = b if trans == blas.Trans -// A^H * x = b if trans == blas.ConjTrans -// where b and x are n element vectors and A is an n×n triangular matrix in -// packed form. -// -// On entry, x contains the values of b, and the solution is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex64, x []complex64, incX int) { - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through ap. - - if trans == blas.NoTrans { - // Form x = inv(A)*x. - if uplo == blas.Upper { - kk := n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - aii := ap[kk] - if n-i-1 > 0 { - x[i] -= c64.DotuUnitary(x[i+1:n], ap[kk+1:kk+n-i]) - } - if diag == blas.NonUnit { - x[i] /= aii - } - kk -= n - i + 1 - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - aii := ap[kk] - if n-i-1 > 0 { - x[ix] -= c64.DotuInc(x, ap[kk+1:kk+n-i], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) - } - if diag == blas.NonUnit { - x[ix] /= aii - } - ix -= incX - kk -= n - i + 1 - } - } - } else { - kk := 0 - if incX == 1 { - for i := 0; i < n; i++ { - if i > 0 { - x[i] -= c64.DotuUnitary(x[:i], ap[kk:kk+i]) - } - if diag == blas.NonUnit { - x[i] /= ap[kk+i] - } - kk += i + 1 - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - x[ix] -= c64.DotuInc(x, ap[kk:kk+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) - } - if diag == blas.NonUnit { - x[ix] /= ap[kk+i] - } - ix += incX - kk += i + 1 - } - } - } - return - } - - if trans == blas.Trans { - // Form x = inv(A^T)*x. - if uplo == blas.Upper { - kk := 0 - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= ap[kk] - } - if n-j-1 > 0 { - c64.AxpyUnitary(-x[j], ap[kk+1:kk+n-j], x[j+1:n]) - } - kk += n - j - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= ap[kk] - } - if n-j-1 > 0 { - c64.AxpyInc(-x[jx], ap[kk+1:kk+n-j], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) - } - jx += incX - kk += n - j - } - } - } else { - kk := n*(n+1)/2 - n - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= ap[kk+j] - } - if j > 0 { - c64.AxpyUnitary(-x[j], ap[kk:kk+j], x[:j]) - } - kk -= j - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= ap[kk+j] - } - if j > 0 { - c64.AxpyInc(-x[jx], ap[kk:kk+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) - } - jx -= incX - kk -= j - } - } - } - return - } - - // Form x = inv(A^H)*x. - if uplo == blas.Upper { - kk := 0 - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(ap[kk]) - } - xj := x[j] - k := kk + 1 - for i := j + 1; i < n; i++ { - x[i] -= xj * cmplx.Conj(ap[k]) - k++ - } - kk += n - j - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(ap[kk]) - } - xj := x[jx] - ix := jx + incX - k := kk + 1 - for i := j + 1; i < n; i++ { - x[ix] -= xj * cmplx.Conj(ap[k]) - ix += incX - k++ - } - jx += incX - kk += n - j - } - } - } else { - kk := n*(n+1)/2 - n - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(ap[kk+j]) - } - xj := x[j] - for i := 0; i < j; i++ { - x[i] -= xj * cmplx.Conj(ap[kk+i]) - } - kk -= j - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(ap[kk+j]) - } - xj := x[jx] - ix := kx - for i := 0; i < j; i++ { - x[ix] -= xj * cmplx.Conj(ap[kk+i]) - ix += incX - } - jx -= incX - kk -= j - } - } - } -} - -// Ctrmv performs one of the matrix-vector operations -// x = A * x if trans = blas.NoTrans -// x = A^T * x if trans = blas.Trans -// x = A^H * x if trans = blas.ConjTrans -// where x is a vector, and A is an n×n triangular matrix. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through A. - - if trans == blas.NoTrans { - // Form x = A*x. - if uplo == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - if n-i-1 > 0 { - x[i] += c64.DotuUnitary(a[i*lda+i+1:i*lda+n], x[i+1:n]) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - if n-i-1 > 0 { - x[ix] += c64.DotuInc(a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix += incX - } - } - } else { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - if i > 0 { - x[i] += c64.DotuUnitary(a[i*lda:i*lda+i], x[:i]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - if i > 0 { - x[ix] += c64.DotuInc(a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - ix -= incX - } - } - } - return - } - - if trans == blas.Trans { - // Form x = A^T*x. - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - if n-i-1 > 0 { - c64.AxpyUnitary(xi, a[i*lda+i+1:i*lda+n], x[i+1:n]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - if n-i-1 > 0 { - c64.AxpyInc(xi, a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - if i > 0 { - c64.AxpyUnitary(x[i], a[i*lda:i*lda+i], x[:i]) - } - if diag == blas.NonUnit { - x[i] *= a[i*lda+i] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - c64.AxpyInc(x[ix], a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - } - if diag == blas.NonUnit { - x[ix] *= a[i*lda+i] - } - ix += incX - } - } - } - return - } - - // Form x = A^H*x. - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda+i]) - } - for j := i + 1; j < n; j++ { - x[j] += xi * cmplx.Conj(a[i*lda+j]) - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda+i]) - } - jx := ix + incX - for j := i + 1; j < n; j++ { - x[jx] += xi * cmplx.Conj(a[i*lda+j]) - jx += incX - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - for j := 0; j < i; j++ { - x[j] += x[i] * cmplx.Conj(a[i*lda+j]) - } - if diag == blas.NonUnit { - x[i] *= cmplx.Conj(a[i*lda+i]) - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - jx := kx - for j := 0; j < i; j++ { - x[jx] += x[ix] * cmplx.Conj(a[i*lda+j]) - jx += incX - } - if diag == blas.NonUnit { - x[ix] *= cmplx.Conj(a[i*lda+i]) - } - ix += incX - } - } - } -} - -// Ctrsv solves one of the systems of equations -// A * x = b if trans == blas.NoTrans -// A^T * x = b if trans == blas.Trans -// A^H * x = b if trans == blas.ConjTrans -// where b and x are n element vectors and A is an n×n triangular matrix. -// -// On entry, x contains the values of b, and the solution is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctrsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { - switch trans { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch uplo { - default: - panic(badUplo) - case blas.Upper, blas.Lower: - } - switch diag { - default: - panic(badDiag) - case blas.NonUnit, blas.Unit: - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - // Set up start index in X. - var kx int - if incX < 0 { - kx = (1 - n) * incX - } - - // The elements of A are accessed sequentially with one pass through A. - - if trans == blas.NoTrans { - // Form x = inv(A)*x. - if uplo == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - aii := a[i*lda+i] - if n-i-1 > 0 { - x[i] -= c64.DotuUnitary(x[i+1:n], a[i*lda+i+1:i*lda+n]) - } - if diag == blas.NonUnit { - x[i] /= aii - } - } - } else { - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - aii := a[i*lda+i] - if n-i-1 > 0 { - x[ix] -= c64.DotuInc(x, a[i*lda+i+1:i*lda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) - } - if diag == blas.NonUnit { - x[ix] /= aii - } - ix -= incX - } - } - } else { - if incX == 1 { - for i := 0; i < n; i++ { - if i > 0 { - x[i] -= c64.DotuUnitary(x[:i], a[i*lda:i*lda+i]) - } - if diag == blas.NonUnit { - x[i] /= a[i*lda+i] - } - } - } else { - ix := kx - for i := 0; i < n; i++ { - if i > 0 { - x[ix] -= c64.DotuInc(x, a[i*lda:i*lda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) - } - if diag == blas.NonUnit { - x[ix] /= a[i*lda+i] - } - ix += incX - } - } - } - return - } - - if trans == blas.Trans { - // Form x = inv(A^T)*x. - if uplo == blas.Upper { - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= a[j*lda+j] - } - if n-j-1 > 0 { - c64.AxpyUnitary(-x[j], a[j*lda+j+1:j*lda+n], x[j+1:n]) - } - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= a[j*lda+j] - } - if n-j-1 > 0 { - c64.AxpyInc(-x[jx], a[j*lda+j+1:j*lda+n], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) - } - jx += incX - } - } - } else { - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= a[j*lda+j] - } - xj := x[j] - if j > 0 { - c64.AxpyUnitary(-xj, a[j*lda:j*lda+j], x[:j]) - } - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= a[j*lda+j] - } - if j > 0 { - c64.AxpyInc(-x[jx], a[j*lda:j*lda+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) - } - jx -= incX - } - } - } - return - } - - // Form x = inv(A^H)*x. - if uplo == blas.Upper { - if incX == 1 { - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[j] - for i := j + 1; i < n; i++ { - x[i] -= xj * cmplx.Conj(a[j*lda+i]) - } - } - } else { - jx := kx - for j := 0; j < n; j++ { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[jx] - ix := jx + incX - for i := j + 1; i < n; i++ { - x[ix] -= xj * cmplx.Conj(a[j*lda+i]) - ix += incX - } - jx += incX - } - } - } else { - if incX == 1 { - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[j] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[j] - for i := 0; i < j; i++ { - x[i] -= xj * cmplx.Conj(a[j*lda+i]) - } - } - } else { - jx := kx + (n-1)*incX - for j := n - 1; j >= 0; j-- { - if diag == blas.NonUnit { - x[jx] /= cmplx.Conj(a[j*lda+j]) - } - xj := x[jx] - ix := kx - for i := 0; i < j; i++ { - x[ix] -= xj * cmplx.Conj(a[j*lda+i]) - ix += incX - } - jx -= incX - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go deleted file mode 100644 index 08e1927f7..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go +++ /dev/null @@ -1,2296 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f32" -) - -var _ blas.Float32Level2 = Implementation{} - -// Sger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - f32.Ger(uintptr(m), uintptr(n), - alpha, - x, uintptr(incX), - y, uintptr(incY), - a, uintptr(lda)) -} - -// Sgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if tA == blas.NoTrans -// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans -// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, -// x and y are vectors, and alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if kL < 0 { - panic(kLLT0) - } - if kU < 0 { - panic(kULT0) - } - if lda < kL+kU+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { - panic(shortA) - } - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - var kx, ky int - if incX < 0 { - kx = -(lenX - 1) * incX - } - if incY < 0 { - ky = -(lenY - 1) * incY - } - - // Form y = beta * y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:lenY] { - y[i] = 0 - } - } else { - f32.ScalUnitary(beta, y[:lenY]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < lenY; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f32.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) - } else { - f32.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - // i and j are indices of the compacted banded matrix. - // off is the offset into the dense matrix (off + j = densej) - nCol := kU + 1 + kL - if tA == blas.NoTrans { - iy := ky - if incX == 1 { - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - xtmp := x[off : off+u-l] - var sum float32 - for j, v := range atmp { - sum += xtmp[j] * v - } - y[iy] += sum * alpha - iy += incY - } - return - } - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - jx := kx - var sum float32 - for _, v := range atmp { - sum += x[off*incX+jx] * v - jx += incX - } - y[iy] += sum * alpha - iy += incY - } - return - } - if incX == 1 { - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[i] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - } - return - } - ix := kx - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[ix] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - ix += incX - } -} - -// Strmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix, and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - nonUnit := d != blas.Unit - if n == 1 { - if nonUnit { - x[0] *= a[0] - } - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - ilda := i * lda - var tmp float32 - if nonUnit { - tmp = a[ilda+i] * x[i] - } else { - tmp = x[i] - } - x[i] = tmp + f32.DotUnitary(a[ilda+i+1:ilda+n], x[i+1:n]) - } - return - } - ix := kx - for i := 0; i < n; i++ { - ilda := i * lda - var tmp float32 - if nonUnit { - tmp = a[ilda+i] * x[ix] - } else { - tmp = x[ix] - } - x[ix] = tmp + f32.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - ilda := i * lda - var tmp float32 - if nonUnit { - tmp += a[ilda+i] * x[i] - } else { - tmp = x[i] - } - x[i] = tmp + f32.DotUnitary(a[ilda:ilda+i], x[:i]) - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - ilda := i * lda - var tmp float32 - if nonUnit { - tmp = a[ilda+i] * x[ix] - } else { - tmp = x[ix] - } - x[ix] = tmp + f32.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) - ix -= incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - ilda := i * lda - xi := x[i] - f32.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) - if nonUnit { - x[i] *= a[ilda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - ilda := i * lda - xi := x[ix] - f32.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) - if nonUnit { - x[ix] *= a[ilda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - ilda := i * lda - xi := x[i] - f32.AxpyUnitary(xi, a[ilda:ilda+i], x[:i]) - if nonUnit { - x[i] *= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - ilda := i * lda - xi := x[ix] - f32.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - if nonUnit { - x[ix] *= a[ilda+i] - } - ix += incX - } -} - -// Strsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix, and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - if n == 1 { - if d == blas.NonUnit { - x[0] /= a[0] - } - return - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - nonUnit := d == blas.NonUnit - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - var sum float32 - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := i + j + 1 - sum += x[jv] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - var sum float32 - jx := ix + incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - var sum float32 - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - sum += x[j] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - var sum float32 - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := j + i + 1 - x[jv] -= v * xi - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx + (i+1)*incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - x[j] -= v * xi - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix -= incX - } -} - -// Ssymv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where A is an n×n symmetric matrix, x and y are vectors, and alpha and -// beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssymv(ul blas.Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up start points - var kx, ky int - if incX < 0 { - kx = -(n - 1) * incX - } - if incY < 0 { - ky = -(n - 1) * incY - } - - // Form y = beta * y - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - f32.ScalUnitary(beta, y[:n]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) - } else { - f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * a[0] * x[0] - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := x[i] * a[i*lda+i] - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jp := j + i + 1 - sum += x[jp] * v - y[jy] += xv * v - jy += incY - } - y[iy] += alpha * sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := x[ix] * a[i*lda+i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - jy := ky - xv := alpha * x[i] - atmp := a[i*lda : i*lda+i] - var sum float32 - for j, v := range atmp { - sum += x[j] * v - y[jy] += xv * v - jy += incY - } - sum += x[i] * a[i*lda+i] - sum *= alpha - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xv := alpha * x[ix] - atmp := a[i*lda : i*lda+i] - var sum float32 - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - sum += x[ix] * a[i*lda+i] - sum *= alpha - y[iy] += sum - ix += incX - iy += incY - } -} - -// Stbmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - - nonunit := d != blas.Unit - - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float32 - atmp := a[i*lda:] - xtmp := x[i:] - for j := 1; j < u; j++ { - sum += xtmp[j] * atmp[j] - } - if nonunit { - sum += xtmp[0] * atmp[0] - } else { - sum += xtmp[0] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float32 - atmp := a[i*lda:] - jx := incX - for j := 1; j < u; j++ { - sum += x[ix+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[0] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float32 - for j := l; j < k; j++ { - sum += x[i-k+j] * atmp[j] - } - if nonunit { - sum += x[i] * atmp[k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float32 - jx := l * incX - for j := l; j < k; j++ { - sum += x[ix-k*incX+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[k] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float32 - for j := 1; j < u; j++ { - sum += x[i-j] * a[(i-j)*lda+j] - } - if nonunit { - sum += x[i] * a[i*lda] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float32 - jx := incX - for j := 1; j < u; j++ { - sum += x[ix-jx] * a[(i-j)*lda+j] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var sum float32 - for j := 0; j < u; j++ { - sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] - } - if nonunit { - sum += x[i] * a[i*lda+k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var ( - sum float32 - jx int - ) - for j := 0; j < u; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda+k] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } -} - -// Stpmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix in packed format, and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xi += v * xtmp[j] - } - x[i] = xi - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset += n - i - ix += incX - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset-i : offset] - for j, v := range atmp { - xi += v * x[j] - } - x[i] = xi - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset -= i + 1 - ix -= incX - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - jx := kx + (i+1)*incX - atmp := ap[offset+1 : offset+n-i] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - offset -= n - i + 1 - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - jx := kx - atmp := ap[offset-i : offset] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - ix += incX - offset += i + 2 - } -} - -// Stbsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans -// where A is an n×n triangular band matrix with k+1 diagonals, -// and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - nonUnit := d == blas.NonUnit - // Form x = A^-1 x. - // Several cases below use subslices for speed improvement. - // The incX != 1 cases usually do not because incX may be negative. - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - atmp := a[i*lda+1:] - xtmp := x[i+1 : i+bands+1] - var sum float32 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - max := k + 1 - if i+max > n { - max = n - i - } - atmp := a[i*lda:] - var ( - jx int - sum float32 - ) - for j := 1; j < max; j++ { - jx += incX - sum += x[ix+jx] * atmp[j] - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[0] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - xtmp := x[i-bands : i] - var sum float32 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= atmp[bands] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - var ( - sum float32 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * atmp[j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[bands] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var sum float32 - for j := 0; j < bands; j++ { - sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var ( - sum float32 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda] - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var sum float32 - xtmp := x[i+1 : i+1+bands] - for j, v := range xtmp { - sum += v * a[(i+j+1)*lda+k-j-1] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+k] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var ( - sum float32 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+k] - } - ix -= incX - } -} - -// Ssbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where A is an n×n symmetric band matrix with k super-diagonals, x and y are -// vectors, and alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssbmv(ul blas.Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up indexes - lenX := n - lenY := n - var kx, ky int - if incX < 0 { - kx = -(lenX - 1) * incX - } - if incY < 0 { - ky = -(lenY - 1) * incY - } - - // Form y = beta * y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - f32.ScalUnitary(beta, y[:n]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) - } else { - f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[i] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[i+j] * v - y[iy+jy] += tmp * v - jy += incY - } - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[ix] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jx := incX - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[ix+jx] * v - y[iy+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += sum - ix += incX - iy += incY - } - return - } - - // Casses where a has bands below the diagonal. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[i] - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[i-k+j] - y[iy-k*incY+jy] += tmp * v - jy += incY - } - y[iy] += tmp * atmp[k] - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[ix] - jx := l * incX - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[ix-k*incX+jx] - y[iy-k*incY+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += tmp * atmp[k] - ix += incX - iy += incY - } -} - -// Ssyr performs the symmetric rank-one update -// A += alpha * x * x^T -// where A is an n×n symmetric matrix, and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - lenX := n - var kx int - if incX < 0 { - kx = -(lenX - 1) * incX - } - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda+i : i*lda+n] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += v * tmp - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - jx := ix - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += x[jx] * tmp - jx += incX - } - } - ix += incX - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda:] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += tmp * v - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - atmp := a[i*lda:] - jx := kx - for j := 0; j < i+1; j++ { - atmp[j] += tmp * x[jx] - jx += incX - } - } - ix += incX - } -} - -// Ssyr2 performs the symmetric rank-two update -// A += alpha * x * y^T + alpha * y * x^T -// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var ky, kx int - if incY < 0 { - ky = -(n - 1) * incY - } - if incX < 0 { - kx = -(n - 1) * incX - } - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } -} - -// Stpsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix in packed format, and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - offset = n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - var sum float32 - for j, v := range atmp { - sum += v * xtmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - var sum float32 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix -= incX - offset -= n - i + 1 - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i : offset] - var sum float32 - for j, v := range atmp { - sum += v * x[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := ap[offset-i : offset] - var sum float32 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix += incX - offset += i + 2 - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] -= v * xi - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] -= v * xi - } - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix -= incX - offset -= i + 1 - } -} - -// Sspmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where A is an n×n symmetric matrix in packed format, x and y are vectors, -// and alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sspmv(ul blas.Uplo, n int, alpha float32, ap []float32, x []float32, incX int, beta float32, y []float32, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up start points - var kx, ky int - if incX < 0 { - kx = -(n - 1) * incX - } - if incY < 0 { - ky = -(n - 1) * incY - } - - // Form y = beta * y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - f32.ScalUnitary(beta, y[:n]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) - } else { - f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * ap[0] * x[0] - return - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := ap[offset] * x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - jy := ky + (i+1)*incY - for j, v := range atmp { - sum += v * xtmp[j] - y[jy] += v * xv - jy += incY - } - y[iy] += alpha * sum - iy += incY - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := ap[offset] * x[ix] - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - atmp := ap[offset-i : offset] - jy := ky - var sum float32 - for j, v := range atmp { - sum += v * x[j] - y[jy] += v * xv - jy += incY - } - sum += ap[offset] * x[i] - y[iy] += alpha * sum - iy += incY - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - atmp := ap[offset-i : offset] - jx := kx - jy := ky - var sum float32 - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - - sum += ap[offset] * x[ix] - y[iy] += alpha * sum - ix += incX - iy += incY - offset += i + 2 - } -} - -// Sspr performs the symmetric rank-one operation -// A += alpha * x * x^T -// where A is an n×n symmetric matrix in packed format, x is a vector, and -// alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sspr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, ap []float32) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - lenX := n - var kx int - if incX < 0 { - kx = -(lenX - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset:] - xv := alpha * x[i] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx + i*incX - atmp := ap[offset:] - xv := alpha * x[ix] - for j := 0; j < n-i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i:] - xv := alpha * x[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := ap[offset-i:] - xv := alpha * x[ix] - for j := 0; j <= i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += i + 2 - } -} - -// Sspr2 performs the symmetric rank-2 update -// A += alpha * x * y^T + alpha * y * x^T -// where A is an n×n symmetric matrix in packed format, x and y are vectors, -// and alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sspr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, ap []float32) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var ky, kx int - if incY < 0 { - ky = -(n - 1) * incY - } - if incX < 0 { - kx = -(n - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset:] - xi := x[i] - yi := y[i] - xtmp := x[i:n] - ytmp := y[i:n] - for j, v := range xtmp { - atmp[j] += alpha * (xi*ytmp[j] + v*yi) - } - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - atmp := ap[offset:] - xi := x[ix] - yi := y[iy] - for j := 0; j < n-i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i:] - xi := x[i] - yi := y[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += alpha * (xi*y[j] + v*yi) - } - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - atmp := ap[offset-i:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += i + 2 - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go deleted file mode 100644 index 261257888..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go +++ /dev/null @@ -1,2264 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f64" -) - -var _ blas.Float64Level2 = Implementation{} - -// Dger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func (Implementation) Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(m-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - f64.Ger(uintptr(m), uintptr(n), - alpha, - x, uintptr(incX), - y, uintptr(incY), - a, uintptr(lda)) -} - -// Dgbmv performs one of the matrix-vector operations -// y = alpha * A * x + beta * y if tA == blas.NoTrans -// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans -// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, -// x and y are vectors, and alpha and beta are scalars. -func (Implementation) Dgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if kL < 0 { - panic(kLLT0) - } - if kU < 0 { - panic(kULT0) - } - if lda < kL+kU+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { - panic(shortA) - } - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - var kx, ky int - if incX < 0 { - kx = -(lenX - 1) * incX - } - if incY < 0 { - ky = -(lenY - 1) * incY - } - - // Form y = beta * y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:lenY] { - y[i] = 0 - } - } else { - f64.ScalUnitary(beta, y[:lenY]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < lenY; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) - } else { - f64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - // i and j are indices of the compacted banded matrix. - // off is the offset into the dense matrix (off + j = densej) - nCol := kU + 1 + kL - if tA == blas.NoTrans { - iy := ky - if incX == 1 { - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - xtmp := x[off : off+u-l] - var sum float64 - for j, v := range atmp { - sum += xtmp[j] * v - } - y[iy] += sum * alpha - iy += incY - } - return - } - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - jx := kx - var sum float64 - for _, v := range atmp { - sum += x[off*incX+jx] * v - jx += incX - } - y[iy] += sum * alpha - iy += incY - } - return - } - if incX == 1 { - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[i] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - } - return - } - ix := kx - for i := 0; i < min(m, n+kL); i++ { - l := max(0, kL-i) - u := min(nCol, n+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[ix] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - ix += incX - } -} - -// Dtrmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix, and x is a vector. -func (Implementation) Dtrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - nonUnit := d != blas.Unit - if n == 1 { - if nonUnit { - x[0] *= a[0] - } - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - ilda := i * lda - var tmp float64 - if nonUnit { - tmp = a[ilda+i] * x[i] - } else { - tmp = x[i] - } - x[i] = tmp + f64.DotUnitary(a[ilda+i+1:ilda+n], x[i+1:n]) - } - return - } - ix := kx - for i := 0; i < n; i++ { - ilda := i * lda - var tmp float64 - if nonUnit { - tmp = a[ilda+i] * x[ix] - } else { - tmp = x[ix] - } - x[ix] = tmp + f64.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - ilda := i * lda - var tmp float64 - if nonUnit { - tmp += a[ilda+i] * x[i] - } else { - tmp = x[i] - } - x[i] = tmp + f64.DotUnitary(a[ilda:ilda+i], x[:i]) - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - ilda := i * lda - var tmp float64 - if nonUnit { - tmp = a[ilda+i] * x[ix] - } else { - tmp = x[ix] - } - x[ix] = tmp + f64.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) - ix -= incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - ilda := i * lda - xi := x[i] - f64.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) - if nonUnit { - x[i] *= a[ilda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - ilda := i * lda - xi := x[ix] - f64.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) - if nonUnit { - x[ix] *= a[ilda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - ilda := i * lda - xi := x[i] - f64.AxpyUnitary(xi, a[ilda:ilda+i], x[:i]) - if nonUnit { - x[i] *= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - ilda := i * lda - xi := x[ix] - f64.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) - if nonUnit { - x[ix] *= a[ilda+i] - } - ix += incX - } -} - -// Dtrsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix, and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Dtrsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - if n == 1 { - if d == blas.NonUnit { - x[0] /= a[0] - } - return - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - nonUnit := d == blas.NonUnit - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - var sum float64 - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := i + j + 1 - sum += x[jv] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - var sum float64 - jx := ix + incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - var sum float64 - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - sum += x[j] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - var sum float64 - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := j + i + 1 - x[jv] -= v * xi - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx + (i+1)*incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - x[j] -= v * xi - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix -= incX - } -} - -// Dsymv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where A is an n×n symmetric matrix, x and y are vectors, and alpha and -// beta are scalars. -func (Implementation) Dsymv(ul blas.Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+n { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up start points - var kx, ky int - if incX < 0 { - kx = -(n - 1) * incX - } - if incY < 0 { - ky = -(n - 1) * incY - } - - // Form y = beta * y - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - f64.ScalUnitary(beta, y[:n]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) - } else { - f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * a[0] * x[0] - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := x[i] * a[i*lda+i] - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jp := j + i + 1 - sum += x[jp] * v - y[jy] += xv * v - jy += incY - } - y[iy] += alpha * sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := x[ix] * a[i*lda+i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - jy := ky - xv := alpha * x[i] - atmp := a[i*lda : i*lda+i] - var sum float64 - for j, v := range atmp { - sum += x[j] * v - y[jy] += xv * v - jy += incY - } - sum += x[i] * a[i*lda+i] - sum *= alpha - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xv := alpha * x[ix] - atmp := a[i*lda : i*lda+i] - var sum float64 - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - sum += x[ix] * a[i*lda+i] - sum *= alpha - y[iy] += sum - ix += incX - iy += incY - } -} - -// Dtbmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. -func (Implementation) Dtbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - - nonunit := d != blas.Unit - - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float64 - atmp := a[i*lda:] - xtmp := x[i:] - for j := 1; j < u; j++ { - sum += xtmp[j] * atmp[j] - } - if nonunit { - sum += xtmp[0] * atmp[0] - } else { - sum += xtmp[0] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float64 - atmp := a[i*lda:] - jx := incX - for j := 1; j < u; j++ { - sum += x[ix+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[0] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float64 - for j := l; j < k; j++ { - sum += x[i-k+j] * atmp[j] - } - if nonunit { - sum += x[i] * atmp[k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float64 - jx := l * incX - for j := l; j < k; j++ { - sum += x[ix-k*incX+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[k] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float64 - for j := 1; j < u; j++ { - sum += x[i-j] * a[(i-j)*lda+j] - } - if nonunit { - sum += x[i] * a[i*lda] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float64 - jx := incX - for j := 1; j < u; j++ { - sum += x[ix-jx] * a[(i-j)*lda+j] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var sum float64 - for j := 0; j < u; j++ { - sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] - } - if nonunit { - sum += x[i] * a[i*lda+k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var ( - sum float64 - jx int - ) - for j := 0; j < u; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda+k] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } -} - -// Dtpmv performs one of the matrix-vector operations -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix in packed format, and x is a vector. -func (Implementation) Dtpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xi += v * xtmp[j] - } - x[i] = xi - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset += n - i - ix += incX - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset-i : offset] - for j, v := range atmp { - xi += v * x[j] - } - x[i] = xi - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset -= i + 1 - ix -= incX - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - jx := kx + (i+1)*incX - atmp := ap[offset+1 : offset+n-i] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - offset -= n - i + 1 - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - jx := kx - atmp := ap[offset-i : offset] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - ix += incX - offset += i + 2 - } -} - -// Dtbsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans -// where A is an n×n triangular band matrix with k+1 diagonals, -// and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Dtbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - nonUnit := d == blas.NonUnit - // Form x = A^-1 x. - // Several cases below use subslices for speed improvement. - // The incX != 1 cases usually do not because incX may be negative. - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - atmp := a[i*lda+1:] - xtmp := x[i+1 : i+bands+1] - var sum float64 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - max := k + 1 - if i+max > n { - max = n - i - } - atmp := a[i*lda:] - var ( - jx int - sum float64 - ) - for j := 1; j < max; j++ { - jx += incX - sum += x[ix+jx] * atmp[j] - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[0] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - xtmp := x[i-bands : i] - var sum float64 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= atmp[bands] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - var ( - sum float64 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * atmp[j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[bands] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var sum float64 - for j := 0; j < bands; j++ { - sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var ( - sum float64 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda] - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var sum float64 - xtmp := x[i+1 : i+1+bands] - for j, v := range xtmp { - sum += v * a[(i+j+1)*lda+k-j-1] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+k] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var ( - sum float64 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+k] - } - ix -= incX - } -} - -// Dsbmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where A is an n×n symmetric band matrix with k super-diagonals, x and y are -// vectors, and alpha and beta are scalars. -func (Implementation) Dsbmv(ul blas.Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(n-1)+k+1 { - panic(shortA) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up indexes - lenX := n - lenY := n - var kx, ky int - if incX < 0 { - kx = -(lenX - 1) * incX - } - if incY < 0 { - ky = -(lenY - 1) * incY - } - - // Form y = beta * y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - f64.ScalUnitary(beta, y[:n]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) - } else { - f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[i] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[i+j] * v - y[iy+jy] += tmp * v - jy += incY - } - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[ix] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jx := incX - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[ix+jx] * v - y[iy+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += sum - ix += incX - iy += incY - } - return - } - - // Casses where a has bands below the diagonal. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[i] - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[i-k+j] - y[iy-k*incY+jy] += tmp * v - jy += incY - } - y[iy] += tmp * atmp[k] - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[ix] - jx := l * incX - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[ix-k*incX+jx] - y[iy-k*incY+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += tmp * atmp[k] - ix += incX - iy += incY - } -} - -// Dsyr performs the symmetric rank-one update -// A += alpha * x * x^T -// where A is an n×n symmetric matrix, and x is a vector. -func (Implementation) Dsyr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - lenX := n - var kx int - if incX < 0 { - kx = -(lenX - 1) * incX - } - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda+i : i*lda+n] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += v * tmp - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - jx := ix - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += x[jx] * tmp - jx += incX - } - } - ix += incX - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda:] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += tmp * v - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - atmp := a[i*lda:] - jx := kx - for j := 0; j < i+1; j++ { - atmp[j] += tmp * x[jx] - jx += incX - } - } - ix += incX - } -} - -// Dsyr2 performs the symmetric rank-two update -// A += alpha * x * y^T + alpha * y * x^T -// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. -func (Implementation) Dsyr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(a) < lda*(n-1)+n { - panic(shortA) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var ky, kx int - if incY < 0 { - ky = -(n - 1) * incY - } - if incX < 0 { - kx = -(n - 1) * incX - } - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } -} - -// Dtpsv solves one of the systems of equations -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix in packed format, and x and b are vectors. -// -// At entry to the function, x contains the values of b, and the result is -// stored in-place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Dtpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - offset = n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - var sum float64 - for j, v := range atmp { - sum += v * xtmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - var sum float64 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix -= incX - offset -= n - i + 1 - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i : offset] - var sum float64 - for j, v := range atmp { - sum += v * x[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := ap[offset-i : offset] - var sum float64 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix += incX - offset += i + 2 - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] -= v * xi - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] -= v * xi - } - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix -= incX - offset -= i + 1 - } -} - -// Dspmv performs the matrix-vector operation -// y = alpha * A * x + beta * y -// where A is an n×n symmetric matrix in packed format, x and y are vectors, -// and alpha and beta are scalars. -func (Implementation) Dspmv(ul blas.Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - // Set up start points - var kx, ky int - if incX < 0 { - kx = -(n - 1) * incX - } - if incY < 0 { - ky = -(n - 1) * incY - } - - // Form y = beta * y. - if beta != 1 { - if incY == 1 { - if beta == 0 { - for i := range y[:n] { - y[i] = 0 - } - } else { - f64.ScalUnitary(beta, y[:n]) - } - } else { - iy := ky - if beta == 0 { - for i := 0; i < n; i++ { - y[iy] = 0 - iy += incY - } - } else { - if incY > 0 { - f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) - } else { - f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) - } - } - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * ap[0] * x[0] - return - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := ap[offset] * x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - jy := ky + (i+1)*incY - for j, v := range atmp { - sum += v * xtmp[j] - y[jy] += v * xv - jy += incY - } - y[iy] += alpha * sum - iy += incY - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := ap[offset] * x[ix] - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - atmp := ap[offset-i : offset] - jy := ky - var sum float64 - for j, v := range atmp { - sum += v * x[j] - y[jy] += v * xv - jy += incY - } - sum += ap[offset] * x[i] - y[iy] += alpha * sum - iy += incY - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - atmp := ap[offset-i : offset] - jx := kx - jy := ky - var sum float64 - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - - sum += ap[offset] * x[ix] - y[iy] += alpha * sum - ix += incX - iy += incY - offset += i + 2 - } -} - -// Dspr performs the symmetric rank-one operation -// A += alpha * x * x^T -// where A is an n×n symmetric matrix in packed format, x is a vector, and -// alpha is a scalar. -func (Implementation) Dspr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, ap []float64) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - lenX := n - var kx int - if incX < 0 { - kx = -(lenX - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset:] - xv := alpha * x[i] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx + i*incX - atmp := ap[offset:] - xv := alpha * x[ix] - for j := 0; j < n-i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i:] - xv := alpha * x[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := ap[offset-i:] - xv := alpha * x[ix] - for j := 0; j <= i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += i + 2 - } -} - -// Dspr2 performs the symmetric rank-2 update -// A += alpha * x * y^T + alpha * y * x^T -// where A is an n×n symmetric matrix in packed format, x and y are vectors, -// and alpha is a scalar. -func (Implementation) Dspr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, ap []float64) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { - panic(shortX) - } - if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { - panic(shortY) - } - if len(ap) < n*(n+1)/2 { - panic(shortAP) - } - - // Quick return if possible. - if alpha == 0 { - return - } - - var ky, kx int - if incY < 0 { - ky = -(n - 1) * incY - } - if incX < 0 { - kx = -(n - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset:] - xi := x[i] - yi := y[i] - xtmp := x[i:n] - ytmp := y[i:n] - for j, v := range xtmp { - atmp[j] += alpha * (xi*ytmp[j] + v*yi) - } - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - atmp := ap[offset:] - xi := x[ix] - yi := y[iy] - for j := 0; j < n-i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i:] - xi := x[i] - yi := y[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += alpha * (xi*y[j] + v*yi) - } - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - atmp := ap[offset-i:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += i + 2 - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go deleted file mode 100644 index e4a2bb5e9..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go +++ /dev/null @@ -1,1715 +0,0 @@ -// Copyright ©2019 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math/cmplx" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/c128" -) - -var _ blas.Complex128Level3 = Implementation{} - -// Zgemm performs one of the matrix-matrix operations -// C = alpha * op(A) * op(B) + beta * C -// where op(X) is one of -// op(X) = X or op(X) = X^T or op(X) = X^H, -// alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, -// op(B) a k×n matrix and C an m×n matrix. -func (Implementation) Zgemm(tA, tB blas.Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { - switch tA { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch tB { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - } - rowA, colA := m, k - if tA != blas.NoTrans { - rowA, colA = k, m - } - if lda < max(1, colA) { - panic(badLdA) - } - rowB, colB := k, n - if tB != blas.NoTrans { - rowB, colB = n, k - } - if ldb < max(1, colB) { - panic(badLdB) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (rowA-1)*lda+colA { - panic(shortA) - } - if len(b) < (rowB-1)*ldb+colB { - panic(shortB) - } - if len(c) < (m-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - } - return - } - - switch tA { - case blas.NoTrans: - switch tB { - case blas.NoTrans: - // Form C = alpha * A * B + beta * C. - for i := 0; i < m; i++ { - switch { - case beta == 0: - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - case beta != 1: - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[i*lda+l] - for j := 0; j < n; j++ { - c[i*ldc+j] += tmp * b[l*ldb+j] - } - } - } - case blas.Trans: - // Form C = alpha * A * B^T + beta * C. - for i := 0; i < m; i++ { - switch { - case beta == 0: - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - case beta != 1: - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[i*lda+l] - for j := 0; j < n; j++ { - c[i*ldc+j] += tmp * b[j*ldb+l] - } - } - } - case blas.ConjTrans: - // Form C = alpha * A * B^H + beta * C. - for i := 0; i < m; i++ { - switch { - case beta == 0: - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - case beta != 1: - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[i*lda+l] - for j := 0; j < n; j++ { - c[i*ldc+j] += tmp * cmplx.Conj(b[j*ldb+l]) - } - } - } - } - case blas.Trans: - switch tB { - case blas.NoTrans: - // Form C = alpha * A^T * B + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex128 - for l := 0; l < k; l++ { - tmp += a[l*lda+i] * b[l*ldb+j] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.Trans: - // Form C = alpha * A^T * B^T + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex128 - for l := 0; l < k; l++ { - tmp += a[l*lda+i] * b[j*ldb+l] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.ConjTrans: - // Form C = alpha * A^T * B^H + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex128 - for l := 0; l < k; l++ { - tmp += a[l*lda+i] * cmplx.Conj(b[j*ldb+l]) - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - case blas.ConjTrans: - switch tB { - case blas.NoTrans: - // Form C = alpha * A^H * B + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex128 - for l := 0; l < k; l++ { - tmp += cmplx.Conj(a[l*lda+i]) * b[l*ldb+j] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.Trans: - // Form C = alpha * A^H * B^T + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex128 - for l := 0; l < k; l++ { - tmp += cmplx.Conj(a[l*lda+i]) * b[j*ldb+l] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.ConjTrans: - // Form C = alpha * A^H * B^H + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex128 - for l := 0; l < k; l++ { - tmp += cmplx.Conj(a[l*lda+i]) * cmplx.Conj(b[j*ldb+l]) - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - } -} - -// Zhemm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right -// where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B -// and C are m×n matrices. The imaginary parts of the diagonal elements of A are -// assumed to be zero. -func (Implementation) Zhemm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(na-1)+na { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - if len(c) < ldc*(m-1)+n { - panic(shortC) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - c128.ScalUnitary(beta, ci) - } - } - return - } - - if side == blas.Left { - // Form C = alpha*A*B + beta*C. - for i := 0; i < m; i++ { - atmp := alpha * complex(real(a[i*lda+i]), 0) - bi := b[i*ldb : i*ldb+n] - ci := c[i*ldc : i*ldc+n] - if beta == 0 { - for j, bij := range bi { - ci[j] = atmp * bij - } - } else { - for j, bij := range bi { - ci[j] = atmp*bij + beta*ci[j] - } - } - if uplo == blas.Upper { - for k := 0; k < i; k++ { - atmp = alpha * cmplx.Conj(a[k*lda+i]) - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * a[i*lda+k] - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } else { - for k := 0; k < i; k++ { - atmp = alpha * a[i*lda+k] - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * cmplx.Conj(a[k*lda+i]) - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } - } - } else { - // Form C = alpha*B*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - abij := alpha * b[i*ldb+j] - aj := a[j*lda+j+1 : j*lda+n] - bi := b[i*ldb+j+1 : i*ldb+n] - ci := c[i*ldc+j+1 : i*ldc+n] - var tmp complex128 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * cmplx.Conj(ajk) - } - ajj := complex(real(a[j*lda+j]), 0) - if beta == 0 { - c[i*ldc+j] = abij*ajj + alpha*tmp - } else { - c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - abij := alpha * b[i*ldb+j] - aj := a[j*lda : j*lda+j] - bi := b[i*ldb : i*ldb+j] - ci := c[i*ldc : i*ldc+j] - var tmp complex128 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * cmplx.Conj(ajk) - } - ajj := complex(real(a[j*lda+j]), 0) - if beta == 0 { - c[i*ldc+j] = abij*ajj + alpha*tmp - } else { - c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - } -} - -// Zherk performs one of the hermitian rank-k operations -// C = alpha*A*A^H + beta*C if trans == blas.NoTrans -// C = alpha*A^H*A + beta*C if trans == blas.ConjTrans -// where alpha and beta are real scalars, C is an n×n hermitian matrix and A is -// an n×k matrix in the first case and a k×n matrix in the second case. -// -// The imaginary parts of the diagonal elements of C are assumed to be zero, and -// on return they will be set to zero. -func (Implementation) Zherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) { - var rowA, colA int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - rowA, colA = n, k - case blas.ConjTrans: - rowA, colA = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, colA): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (rowA-1)*lda+colA { - panic(shortA) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ci[0] = complex(beta*real(ci[0]), 0) - if i != n-1 { - c128.DscalUnitary(beta, ci[1:]) - } - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - if i != 0 { - c128.DscalUnitary(beta, ci[:i]) - } - ci[i] = complex(beta*real(ci[i]), 0) - } - } - } - return - } - - calpha := complex(alpha, 0) - if trans == blas.NoTrans { - // Form C = alpha*A*A^H + beta*C. - cbeta := complex(beta, 0) - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ai := a[i*lda : i*lda+k] - switch { - case beta == 0: - // Handle the i-th diagonal element of C. - ci[0] = complex(alpha*real(c128.DotcUnitary(ai, ai)), 0) - // Handle the remaining elements on the i-th row of C. - for jc := range ci[1:] { - j := i + 1 + jc - ci[jc+1] = calpha * c128.DotcUnitary(a[j*lda:j*lda+k], ai) - } - case beta != 1: - cii := calpha*c128.DotcUnitary(ai, ai) + cbeta*ci[0] - ci[0] = complex(real(cii), 0) - for jc, cij := range ci[1:] { - j := i + 1 + jc - ci[jc+1] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij - } - default: - cii := calpha*c128.DotcUnitary(ai, ai) + ci[0] - ci[0] = complex(real(cii), 0) - for jc, cij := range ci[1:] { - j := i + 1 + jc - ci[jc+1] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cij - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - ai := a[i*lda : i*lda+k] - switch { - case beta == 0: - // Handle the first i-1 elements on the i-th row of C. - for j := range ci[:i] { - ci[j] = calpha * c128.DotcUnitary(a[j*lda:j*lda+k], ai) - } - // Handle the i-th diagonal element of C. - ci[i] = complex(alpha*real(c128.DotcUnitary(ai, ai)), 0) - case beta != 1: - for j, cij := range ci[:i] { - ci[j] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij - } - cii := calpha*c128.DotcUnitary(ai, ai) + cbeta*ci[i] - ci[i] = complex(real(cii), 0) - default: - for j, cij := range ci[:i] { - ci[j] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cij - } - cii := calpha*c128.DotcUnitary(ai, ai) + ci[i] - ci[i] = complex(real(cii), 0) - } - } - } - } else { - // Form C = alpha*A^H*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - c128.DscalUnitary(beta, ci) - ci[0] = complex(real(ci[0]), 0) - default: - ci[0] = complex(real(ci[0]), 0) - } - for j := 0; j < k; j++ { - aji := cmplx.Conj(a[j*lda+i]) - if aji != 0 { - c128.AxpyUnitary(calpha*aji, a[j*lda+i:j*lda+n], ci) - } - } - c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - c128.DscalUnitary(beta, ci) - ci[i] = complex(real(ci[i]), 0) - default: - ci[i] = complex(real(ci[i]), 0) - } - for j := 0; j < k; j++ { - aji := cmplx.Conj(a[j*lda+i]) - if aji != 0 { - c128.AxpyUnitary(calpha*aji, a[j*lda:j*lda+i+1], ci) - } - } - c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) - } - } - } -} - -// Zher2k performs one of the hermitian rank-2k operations -// C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C if trans == blas.NoTrans -// C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C if trans == blas.ConjTrans -// where alpha and beta are scalars with beta real, C is an n×n hermitian matrix -// and A and B are n×k matrices in the first case and k×n matrices in the second case. -// -// The imaginary parts of the diagonal elements of C are assumed to be zero, and -// on return they will be set to zero. -func (Implementation) Zher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) { - var row, col int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - row, col = n, k - case blas.ConjTrans: - row, col = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, col): - panic(badLdA) - case ldb < max(1, col): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (row-1)*lda+col { - panic(shortA) - } - if len(b) < (row-1)*ldb+col { - panic(shortB) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ci[0] = complex(beta*real(ci[0]), 0) - if i != n-1 { - c128.DscalUnitary(beta, ci[1:]) - } - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - if i != 0 { - c128.DscalUnitary(beta, ci[:i]) - } - ci[i] = complex(beta*real(ci[i]), 0) - } - } - } - return - } - - conjalpha := cmplx.Conj(alpha) - cbeta := complex(beta, 0) - if trans == blas.NoTrans { - // Form C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i+1 : i*ldc+n] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) - c[i*ldc+i] = complex(real(cii), 0) - for jc := range ci { - j := i + 1 + jc - ci[jc] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) - } - } else { - cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] - c[i*ldc+i] = complex(real(cii), 0) - for jc, cij := range ci { - j := i + 1 + jc - ci[jc] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - for j := range ci { - ci[j] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) - } - cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) - c[i*ldc+i] = complex(real(cii), 0) - } else { - for j, cij := range ci { - ci[j] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij - } - cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] - c[i*ldc+i] = complex(real(cii), 0) - } - } - } - } else { - // Form C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - c128.DscalUnitary(beta, ci) - ci[0] = complex(real(ci[0]), 0) - default: - ci[0] = complex(real(ci[0]), 0) - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c128.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb+i:j*ldb+n], ci) - } - if bji != 0 { - c128.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda+i:j*lda+n], ci) - } - } - ci[0] = complex(real(ci[0]), 0) - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - c128.DscalUnitary(beta, ci) - ci[i] = complex(real(ci[i]), 0) - default: - ci[i] = complex(real(ci[i]), 0) - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c128.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb:j*ldb+i+1], ci) - } - if bji != 0 { - c128.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda:j*lda+i+1], ci) - } - } - ci[i] = complex(real(ci[i]), 0) - } - } - } -} - -// Zsymm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right -// where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B -// and C are m×n matrices. -func (Implementation) Zsymm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(na-1)+na { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - if len(c) < ldc*(m-1)+n { - panic(shortC) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - c128.ScalUnitary(beta, ci) - } - } - return - } - - if side == blas.Left { - // Form C = alpha*A*B + beta*C. - for i := 0; i < m; i++ { - atmp := alpha * a[i*lda+i] - bi := b[i*ldb : i*ldb+n] - ci := c[i*ldc : i*ldc+n] - if beta == 0 { - for j, bij := range bi { - ci[j] = atmp * bij - } - } else { - for j, bij := range bi { - ci[j] = atmp*bij + beta*ci[j] - } - } - if uplo == blas.Upper { - for k := 0; k < i; k++ { - atmp = alpha * a[k*lda+i] - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * a[i*lda+k] - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } else { - for k := 0; k < i; k++ { - atmp = alpha * a[i*lda+k] - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * a[k*lda+i] - c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } - } - } else { - // Form C = alpha*B*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - abij := alpha * b[i*ldb+j] - aj := a[j*lda+j+1 : j*lda+n] - bi := b[i*ldb+j+1 : i*ldb+n] - ci := c[i*ldc+j+1 : i*ldc+n] - var tmp complex128 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * ajk - } - if beta == 0 { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp - } else { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - abij := alpha * b[i*ldb+j] - aj := a[j*lda : j*lda+j] - bi := b[i*ldb : i*ldb+j] - ci := c[i*ldc : i*ldc+j] - var tmp complex128 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * ajk - } - if beta == 0 { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp - } else { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - } -} - -// Zsyrk performs one of the symmetric rank-k operations -// C = alpha*A*A^T + beta*C if trans == blas.NoTrans -// C = alpha*A^T*A + beta*C if trans == blas.Trans -// where alpha and beta are scalars, C is an n×n symmetric matrix and A is -// an n×k matrix in the first case and a k×n matrix in the second case. -func (Implementation) Zsyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) { - var rowA, colA int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - rowA, colA = n, k - case blas.Trans: - rowA, colA = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, colA): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (rowA-1)*lda+colA { - panic(shortA) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - c128.ScalUnitary(beta, ci) - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - c128.ScalUnitary(beta, ci) - } - } - } - return - } - - if trans == blas.NoTrans { - // Form C = alpha*A*A^T + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ai := a[i*lda : i*lda+k] - for jc, cij := range ci { - j := i + jc - ci[jc] = beta*cij + alpha*c128.DotuUnitary(ai, a[j*lda:j*lda+k]) - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - ai := a[i*lda : i*lda+k] - for j, cij := range ci { - ci[j] = beta*cij + alpha*c128.DotuUnitary(ai, a[j*lda:j*lda+k]) - } - } - } - } else { - // Form C = alpha*A^T*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - for jc := range ci { - ci[jc] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - if aji != 0 { - c128.AxpyUnitary(alpha*aji, a[j*lda+i:j*lda+n], ci) - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - for j := range ci { - ci[j] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - if aji != 0 { - c128.AxpyUnitary(alpha*aji, a[j*lda:j*lda+i+1], ci) - } - } - } - } - } -} - -// Zsyr2k performs one of the symmetric rank-2k operations -// C = alpha*A*B^T + alpha*B*A^T + beta*C if trans == blas.NoTrans -// C = alpha*A^T*B + alpha*B^T*A + beta*C if trans == blas.Trans -// where alpha and beta are scalars, C is an n×n symmetric matrix and A and B -// are n×k matrices in the first case and k×n matrices in the second case. -func (Implementation) Zsyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { - var row, col int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - row, col = n, k - case blas.Trans: - row, col = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, col): - panic(badLdA) - case ldb < max(1, col): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (row-1)*lda+col { - panic(shortA) - } - if len(b) < (row-1)*ldb+col { - panic(shortB) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - c128.ScalUnitary(beta, ci) - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - c128.ScalUnitary(beta, ci) - } - } - } - return - } - - if trans == blas.NoTrans { - // Form C = alpha*A*B^T + alpha*B*A^T + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - for jc := range ci { - j := i + jc - ci[jc] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) - } - } else { - for jc, cij := range ci { - j := i + jc - ci[jc] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - for j := range ci { - ci[j] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) - } - } else { - for j, cij := range ci { - ci[j] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij - } - } - } - } - } else { - // Form C = alpha*A^T*B + alpha*B^T*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - for jc := range ci { - ci[jc] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c128.AxpyUnitary(alpha*aji, b[j*ldb+i:j*ldb+n], ci) - } - if bji != 0 { - c128.AxpyUnitary(alpha*bji, a[j*lda+i:j*lda+n], ci) - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - for j := range ci { - ci[j] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c128.AxpyUnitary(alpha*aji, b[j*ldb:j*ldb+i+1], ci) - } - if bji != 0 { - c128.AxpyUnitary(alpha*bji, a[j*lda:j*lda+i+1], ci) - } - } - } - } - } -} - -// Ztrmm performs one of the matrix-matrix operations -// B = alpha * op(A) * B if side == blas.Left, -// B = alpha * B * op(A) if side == blas.Right, -// where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, -// upper or lower triangular matrix and op(A) is one of -// op(A) = A if trans == blas.NoTrans, -// op(A) = A^T if trans == blas.Trans, -// op(A) = A^H if trans == blas.ConjTrans. -func (Implementation) Ztrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: - panic(badTranspose) - case diag != blas.Unit && diag != blas.NonUnit: - panic(badDiag) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (na-1)*lda+na { - panic(shortA) - } - if len(b) < (m-1)*ldb+n { - panic(shortB) - } - - // Quick return if possible. - if alpha == 0 { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j := range bi { - bi[j] = 0 - } - } - return - } - - noConj := trans != blas.ConjTrans - noUnit := diag == blas.NonUnit - if side == blas.Left { - if trans == blas.NoTrans { - // Form B = alpha*A*B. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - aii := alpha - if noUnit { - aii *= a[i*lda+i] - } - bi := b[i*ldb : i*ldb+n] - for j := range bi { - bi[j] *= aii - } - for ja, aij := range a[i*lda+i+1 : i*lda+m] { - j := ja + i + 1 - if aij != 0 { - c128.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) - } - } - } - } else { - for i := m - 1; i >= 0; i-- { - aii := alpha - if noUnit { - aii *= a[i*lda+i] - } - bi := b[i*ldb : i*ldb+n] - for j := range bi { - bi[j] *= aii - } - for j, aij := range a[i*lda : i*lda+i] { - if aij != 0 { - c128.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) - } - } - } - } - } else { - // Form B = alpha*A^T*B or B = alpha*A^H*B. - if uplo == blas.Upper { - for k := m - 1; k >= 0; k-- { - bk := b[k*ldb : k*ldb+n] - for ja, ajk := range a[k*lda+k+1 : k*lda+m] { - if ajk == 0 { - continue - } - j := k + 1 + ja - if noConj { - c128.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) - } else { - c128.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) - } - } - akk := alpha - if noUnit { - if noConj { - akk *= a[k*lda+k] - } else { - akk *= cmplx.Conj(a[k*lda+k]) - } - } - if akk != 1 { - c128.ScalUnitary(akk, bk) - } - } - } else { - for k := 0; k < m; k++ { - bk := b[k*ldb : k*ldb+n] - for j, ajk := range a[k*lda : k*lda+k] { - if ajk == 0 { - continue - } - if noConj { - c128.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) - } else { - c128.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) - } - } - akk := alpha - if noUnit { - if noConj { - akk *= a[k*lda+k] - } else { - akk *= cmplx.Conj(a[k*lda+k]) - } - } - if akk != 1 { - c128.ScalUnitary(akk, bk) - } - } - } - } - } else { - if trans == blas.NoTrans { - // Form B = alpha*B*A. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for k := n - 1; k >= 0; k-- { - abik := alpha * bi[k] - if abik == 0 { - continue - } - bi[k] = abik - if noUnit { - bi[k] *= a[k*lda+k] - } - c128.AxpyUnitary(abik, a[k*lda+k+1:k*lda+n], bi[k+1:]) - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for k := 0; k < n; k++ { - abik := alpha * bi[k] - if abik == 0 { - continue - } - bi[k] = abik - if noUnit { - bi[k] *= a[k*lda+k] - } - c128.AxpyUnitary(abik, a[k*lda:k*lda+k], bi[:k]) - } - } - } - } else { - // Form B = alpha*B*A^T or B = alpha*B*A^H. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j, bij := range bi { - if noConj { - if noUnit { - bij *= a[j*lda+j] - } - bij += c128.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - } else { - if noUnit { - bij *= cmplx.Conj(a[j*lda+j]) - } - bij += c128.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - } - bi[j] = alpha * bij - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - bij := bi[j] - if noConj { - if noUnit { - bij *= a[j*lda+j] - } - bij += c128.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) - } else { - if noUnit { - bij *= cmplx.Conj(a[j*lda+j]) - } - bij += c128.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) - } - bi[j] = alpha * bij - } - } - } - } - } -} - -// Ztrsm solves one of the matrix equations -// op(A) * X = alpha * B if side == blas.Left, -// X * op(A) = alpha * B if side == blas.Right, -// where alpha is a scalar, X and B are m×n matrices, A is a unit or -// non-unit, upper or lower triangular matrix and op(A) is one of -// op(A) = A if transA == blas.NoTrans, -// op(A) = A^T if transA == blas.Trans, -// op(A) = A^H if transA == blas.ConjTrans. -// On return the matrix X is overwritten on B. -func (Implementation) Ztrsm(side blas.Side, uplo blas.Uplo, transA blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case transA != blas.NoTrans && transA != blas.Trans && transA != blas.ConjTrans: - panic(badTranspose) - case diag != blas.Unit && diag != blas.NonUnit: - panic(badDiag) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (na-1)*lda+na { - panic(shortA) - } - if len(b) < (m-1)*ldb+n { - panic(shortB) - } - - if alpha == 0 { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - b[i*ldb+j] = 0 - } - } - return - } - - noConj := transA != blas.ConjTrans - noUnit := diag == blas.NonUnit - if side == blas.Left { - if transA == blas.NoTrans { - // Form B = alpha*inv(A)*B. - if uplo == blas.Upper { - for i := m - 1; i >= 0; i-- { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c128.ScalUnitary(alpha, bi) - } - for ka, aik := range a[i*lda+i+1 : i*lda+m] { - k := i + 1 + ka - if aik != 0 { - c128.AxpyUnitary(-aik, b[k*ldb:k*ldb+n], bi) - } - } - if noUnit { - c128.ScalUnitary(1/a[i*lda+i], bi) - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c128.ScalUnitary(alpha, bi) - } - for j, aij := range a[i*lda : i*lda+i] { - if aij != 0 { - c128.AxpyUnitary(-aij, b[j*ldb:j*ldb+n], bi) - } - } - if noUnit { - c128.ScalUnitary(1/a[i*lda+i], bi) - } - } - } - } else { - // Form B = alpha*inv(A^T)*B or B = alpha*inv(A^H)*B. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if noUnit { - if noConj { - c128.ScalUnitary(1/a[i*lda+i], bi) - } else { - c128.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) - } - } - for ja, aij := range a[i*lda+i+1 : i*lda+m] { - if aij == 0 { - continue - } - j := i + 1 + ja - if noConj { - c128.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) - } else { - c128.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) - } - } - if alpha != 1 { - c128.ScalUnitary(alpha, bi) - } - } - } else { - for i := m - 1; i >= 0; i-- { - bi := b[i*ldb : i*ldb+n] - if noUnit { - if noConj { - c128.ScalUnitary(1/a[i*lda+i], bi) - } else { - c128.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) - } - } - for j, aij := range a[i*lda : i*lda+i] { - if aij == 0 { - continue - } - if noConj { - c128.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) - } else { - c128.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) - } - } - if alpha != 1 { - c128.ScalUnitary(alpha, bi) - } - } - } - } - } else { - if transA == blas.NoTrans { - // Form B = alpha*B*inv(A). - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c128.ScalUnitary(alpha, bi) - } - for j, bij := range bi { - if bij == 0 { - continue - } - if noUnit { - bi[j] /= a[j*lda+j] - } - c128.AxpyUnitary(-bi[j], a[j*lda+j+1:j*lda+n], bi[j+1:n]) - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c128.ScalUnitary(alpha, bi) - } - for j := n - 1; j >= 0; j-- { - if bi[j] == 0 { - continue - } - if noUnit { - bi[j] /= a[j*lda+j] - } - c128.AxpyUnitary(-bi[j], a[j*lda:j*lda+j], bi[:j]) - } - } - } - } else { - // Form B = alpha*B*inv(A^T) or B = alpha*B*inv(A^H). - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - bij := alpha * bi[j] - if noConj { - bij -= c128.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - if noUnit { - bij /= a[j*lda+j] - } - } else { - bij -= c128.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - if noUnit { - bij /= cmplx.Conj(a[j*lda+j]) - } - } - bi[j] = bij - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j, bij := range bi { - bij *= alpha - if noConj { - bij -= c128.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) - if noUnit { - bij /= a[j*lda+j] - } - } else { - bij -= c128.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) - if noUnit { - bij /= cmplx.Conj(a[j*lda+j]) - } - } - bi[j] = bij - } - } - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go deleted file mode 100644 index 436c54506..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go +++ /dev/null @@ -1,1735 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2019 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - cmplx "gonum.org/v1/gonum/internal/cmplx64" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/c64" -) - -var _ blas.Complex64Level3 = Implementation{} - -// Cgemm performs one of the matrix-matrix operations -// C = alpha * op(A) * op(B) + beta * C -// where op(X) is one of -// op(X) = X or op(X) = X^T or op(X) = X^H, -// alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, -// op(B) a k×n matrix and C an m×n matrix. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cgemm(tA, tB blas.Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { - switch tA { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch tB { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - } - rowA, colA := m, k - if tA != blas.NoTrans { - rowA, colA = k, m - } - if lda < max(1, colA) { - panic(badLdA) - } - rowB, colB := k, n - if tB != blas.NoTrans { - rowB, colB = n, k - } - if ldb < max(1, colB) { - panic(badLdB) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (rowA-1)*lda+colA { - panic(shortA) - } - if len(b) < (rowB-1)*ldb+colB { - panic(shortB) - } - if len(c) < (m-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - } - return - } - - switch tA { - case blas.NoTrans: - switch tB { - case blas.NoTrans: - // Form C = alpha * A * B + beta * C. - for i := 0; i < m; i++ { - switch { - case beta == 0: - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - case beta != 1: - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[i*lda+l] - for j := 0; j < n; j++ { - c[i*ldc+j] += tmp * b[l*ldb+j] - } - } - } - case blas.Trans: - // Form C = alpha * A * B^T + beta * C. - for i := 0; i < m; i++ { - switch { - case beta == 0: - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - case beta != 1: - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[i*lda+l] - for j := 0; j < n; j++ { - c[i*ldc+j] += tmp * b[j*ldb+l] - } - } - } - case blas.ConjTrans: - // Form C = alpha * A * B^H + beta * C. - for i := 0; i < m; i++ { - switch { - case beta == 0: - for j := 0; j < n; j++ { - c[i*ldc+j] = 0 - } - case beta != 1: - for j := 0; j < n; j++ { - c[i*ldc+j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[i*lda+l] - for j := 0; j < n; j++ { - c[i*ldc+j] += tmp * cmplx.Conj(b[j*ldb+l]) - } - } - } - } - case blas.Trans: - switch tB { - case blas.NoTrans: - // Form C = alpha * A^T * B + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex64 - for l := 0; l < k; l++ { - tmp += a[l*lda+i] * b[l*ldb+j] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.Trans: - // Form C = alpha * A^T * B^T + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex64 - for l := 0; l < k; l++ { - tmp += a[l*lda+i] * b[j*ldb+l] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.ConjTrans: - // Form C = alpha * A^T * B^H + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex64 - for l := 0; l < k; l++ { - tmp += a[l*lda+i] * cmplx.Conj(b[j*ldb+l]) - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - case blas.ConjTrans: - switch tB { - case blas.NoTrans: - // Form C = alpha * A^H * B + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex64 - for l := 0; l < k; l++ { - tmp += cmplx.Conj(a[l*lda+i]) * b[l*ldb+j] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.Trans: - // Form C = alpha * A^H * B^T + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex64 - for l := 0; l < k; l++ { - tmp += cmplx.Conj(a[l*lda+i]) * b[j*ldb+l] - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - case blas.ConjTrans: - // Form C = alpha * A^H * B^H + beta * C. - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - var tmp complex64 - for l := 0; l < k; l++ { - tmp += cmplx.Conj(a[l*lda+i]) * cmplx.Conj(b[j*ldb+l]) - } - if beta == 0 { - c[i*ldc+j] = alpha * tmp - } else { - c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - } -} - -// Chemm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right -// where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B -// and C are m×n matrices. The imaginary parts of the diagonal elements of A are -// assumed to be zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Chemm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(na-1)+na { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - if len(c) < ldc*(m-1)+n { - panic(shortC) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - c64.ScalUnitary(beta, ci) - } - } - return - } - - if side == blas.Left { - // Form C = alpha*A*B + beta*C. - for i := 0; i < m; i++ { - atmp := alpha * complex(real(a[i*lda+i]), 0) - bi := b[i*ldb : i*ldb+n] - ci := c[i*ldc : i*ldc+n] - if beta == 0 { - for j, bij := range bi { - ci[j] = atmp * bij - } - } else { - for j, bij := range bi { - ci[j] = atmp*bij + beta*ci[j] - } - } - if uplo == blas.Upper { - for k := 0; k < i; k++ { - atmp = alpha * cmplx.Conj(a[k*lda+i]) - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * a[i*lda+k] - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } else { - for k := 0; k < i; k++ { - atmp = alpha * a[i*lda+k] - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * cmplx.Conj(a[k*lda+i]) - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } - } - } else { - // Form C = alpha*B*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - abij := alpha * b[i*ldb+j] - aj := a[j*lda+j+1 : j*lda+n] - bi := b[i*ldb+j+1 : i*ldb+n] - ci := c[i*ldc+j+1 : i*ldc+n] - var tmp complex64 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * cmplx.Conj(ajk) - } - ajj := complex(real(a[j*lda+j]), 0) - if beta == 0 { - c[i*ldc+j] = abij*ajj + alpha*tmp - } else { - c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - abij := alpha * b[i*ldb+j] - aj := a[j*lda : j*lda+j] - bi := b[i*ldb : i*ldb+j] - ci := c[i*ldc : i*ldc+j] - var tmp complex64 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * cmplx.Conj(ajk) - } - ajj := complex(real(a[j*lda+j]), 0) - if beta == 0 { - c[i*ldc+j] = abij*ajj + alpha*tmp - } else { - c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - } -} - -// Cherk performs one of the hermitian rank-k operations -// C = alpha*A*A^H + beta*C if trans == blas.NoTrans -// C = alpha*A^H*A + beta*C if trans == blas.ConjTrans -// where alpha and beta are real scalars, C is an n×n hermitian matrix and A is -// an n×k matrix in the first case and a k×n matrix in the second case. -// -// The imaginary parts of the diagonal elements of C are assumed to be zero, and -// on return they will be set to zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) { - var rowA, colA int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - rowA, colA = n, k - case blas.ConjTrans: - rowA, colA = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, colA): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (rowA-1)*lda+colA { - panic(shortA) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ci[0] = complex(beta*real(ci[0]), 0) - if i != n-1 { - c64.SscalUnitary(beta, ci[1:]) - } - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - if i != 0 { - c64.SscalUnitary(beta, ci[:i]) - } - ci[i] = complex(beta*real(ci[i]), 0) - } - } - } - return - } - - calpha := complex(alpha, 0) - if trans == blas.NoTrans { - // Form C = alpha*A*A^H + beta*C. - cbeta := complex(beta, 0) - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ai := a[i*lda : i*lda+k] - switch { - case beta == 0: - // Handle the i-th diagonal element of C. - ci[0] = complex(alpha*real(c64.DotcUnitary(ai, ai)), 0) - // Handle the remaining elements on the i-th row of C. - for jc := range ci[1:] { - j := i + 1 + jc - ci[jc+1] = calpha * c64.DotcUnitary(a[j*lda:j*lda+k], ai) - } - case beta != 1: - cii := calpha*c64.DotcUnitary(ai, ai) + cbeta*ci[0] - ci[0] = complex(real(cii), 0) - for jc, cij := range ci[1:] { - j := i + 1 + jc - ci[jc+1] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij - } - default: - cii := calpha*c64.DotcUnitary(ai, ai) + ci[0] - ci[0] = complex(real(cii), 0) - for jc, cij := range ci[1:] { - j := i + 1 + jc - ci[jc+1] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cij - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - ai := a[i*lda : i*lda+k] - switch { - case beta == 0: - // Handle the first i-1 elements on the i-th row of C. - for j := range ci[:i] { - ci[j] = calpha * c64.DotcUnitary(a[j*lda:j*lda+k], ai) - } - // Handle the i-th diagonal element of C. - ci[i] = complex(alpha*real(c64.DotcUnitary(ai, ai)), 0) - case beta != 1: - for j, cij := range ci[:i] { - ci[j] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij - } - cii := calpha*c64.DotcUnitary(ai, ai) + cbeta*ci[i] - ci[i] = complex(real(cii), 0) - default: - for j, cij := range ci[:i] { - ci[j] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cij - } - cii := calpha*c64.DotcUnitary(ai, ai) + ci[i] - ci[i] = complex(real(cii), 0) - } - } - } - } else { - // Form C = alpha*A^H*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - c64.SscalUnitary(beta, ci) - ci[0] = complex(real(ci[0]), 0) - default: - ci[0] = complex(real(ci[0]), 0) - } - for j := 0; j < k; j++ { - aji := cmplx.Conj(a[j*lda+i]) - if aji != 0 { - c64.AxpyUnitary(calpha*aji, a[j*lda+i:j*lda+n], ci) - } - } - c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - c64.SscalUnitary(beta, ci) - ci[i] = complex(real(ci[i]), 0) - default: - ci[i] = complex(real(ci[i]), 0) - } - for j := 0; j < k; j++ { - aji := cmplx.Conj(a[j*lda+i]) - if aji != 0 { - c64.AxpyUnitary(calpha*aji, a[j*lda:j*lda+i+1], ci) - } - } - c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) - } - } - } -} - -// Cher2k performs one of the hermitian rank-2k operations -// C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C if trans == blas.NoTrans -// C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C if trans == blas.ConjTrans -// where alpha and beta are scalars with beta real, C is an n×n hermitian matrix -// and A and B are n×k matrices in the first case and k×n matrices in the second case. -// -// The imaginary parts of the diagonal elements of C are assumed to be zero, and -// on return they will be set to zero. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Cher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) { - var row, col int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - row, col = n, k - case blas.ConjTrans: - row, col = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, col): - panic(badLdA) - case ldb < max(1, col): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (row-1)*lda+col { - panic(shortA) - } - if len(b) < (row-1)*ldb+col { - panic(shortB) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ci[0] = complex(beta*real(ci[0]), 0) - if i != n-1 { - c64.SscalUnitary(beta, ci[1:]) - } - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - if i != 0 { - c64.SscalUnitary(beta, ci[:i]) - } - ci[i] = complex(beta*real(ci[i]), 0) - } - } - } - return - } - - conjalpha := cmplx.Conj(alpha) - cbeta := complex(beta, 0) - if trans == blas.NoTrans { - // Form C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i+1 : i*ldc+n] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) - c[i*ldc+i] = complex(real(cii), 0) - for jc := range ci { - j := i + 1 + jc - ci[jc] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) - } - } else { - cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] - c[i*ldc+i] = complex(real(cii), 0) - for jc, cij := range ci { - j := i + 1 + jc - ci[jc] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - for j := range ci { - ci[j] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) - } - cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) - c[i*ldc+i] = complex(real(cii), 0) - } else { - for j, cij := range ci { - ci[j] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij - } - cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] - c[i*ldc+i] = complex(real(cii), 0) - } - } - } - } else { - // Form C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - c64.SscalUnitary(beta, ci) - ci[0] = complex(real(ci[0]), 0) - default: - ci[0] = complex(real(ci[0]), 0) - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c64.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb+i:j*ldb+n], ci) - } - if bji != 0 { - c64.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda+i:j*lda+n], ci) - } - } - ci[0] = complex(real(ci[0]), 0) - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - c64.SscalUnitary(beta, ci) - ci[i] = complex(real(ci[i]), 0) - default: - ci[i] = complex(real(ci[i]), 0) - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c64.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb:j*ldb+i+1], ci) - } - if bji != 0 { - c64.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda:j*lda+i+1], ci) - } - } - ci[i] = complex(real(ci[i]), 0) - } - } - } -} - -// Csymm performs one of the matrix-matrix operations -// C = alpha*A*B + beta*C if side == blas.Left -// C = alpha*B*A + beta*C if side == blas.Right -// where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B -// and C are m×n matrices. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Csymm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(na-1)+na { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - if len(c) < ldc*(m-1)+n { - panic(shortC) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - ci := c[i*ldc : i*ldc+n] - c64.ScalUnitary(beta, ci) - } - } - return - } - - if side == blas.Left { - // Form C = alpha*A*B + beta*C. - for i := 0; i < m; i++ { - atmp := alpha * a[i*lda+i] - bi := b[i*ldb : i*ldb+n] - ci := c[i*ldc : i*ldc+n] - if beta == 0 { - for j, bij := range bi { - ci[j] = atmp * bij - } - } else { - for j, bij := range bi { - ci[j] = atmp*bij + beta*ci[j] - } - } - if uplo == blas.Upper { - for k := 0; k < i; k++ { - atmp = alpha * a[k*lda+i] - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * a[i*lda+k] - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } else { - for k := 0; k < i; k++ { - atmp = alpha * a[i*lda+k] - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - for k := i + 1; k < m; k++ { - atmp = alpha * a[k*lda+i] - c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) - } - } - } - } else { - // Form C = alpha*B*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - abij := alpha * b[i*ldb+j] - aj := a[j*lda+j+1 : j*lda+n] - bi := b[i*ldb+j+1 : i*ldb+n] - ci := c[i*ldc+j+1 : i*ldc+n] - var tmp complex64 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * ajk - } - if beta == 0 { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp - } else { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - abij := alpha * b[i*ldb+j] - aj := a[j*lda : j*lda+j] - bi := b[i*ldb : i*ldb+j] - ci := c[i*ldc : i*ldc+j] - var tmp complex64 - for k, ajk := range aj { - ci[k] += abij * ajk - tmp += bi[k] * ajk - } - if beta == 0 { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp - } else { - c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] - } - } - } - } - } -} - -// Csyrk performs one of the symmetric rank-k operations -// C = alpha*A*A^T + beta*C if trans == blas.NoTrans -// C = alpha*A^T*A + beta*C if trans == blas.Trans -// where alpha and beta are scalars, C is an n×n symmetric matrix and A is -// an n×k matrix in the first case and a k×n matrix in the second case. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Csyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) { - var rowA, colA int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - rowA, colA = n, k - case blas.Trans: - rowA, colA = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, colA): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (rowA-1)*lda+colA { - panic(shortA) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - c64.ScalUnitary(beta, ci) - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - c64.ScalUnitary(beta, ci) - } - } - } - return - } - - if trans == blas.NoTrans { - // Form C = alpha*A*A^T + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ai := a[i*lda : i*lda+k] - for jc, cij := range ci { - j := i + jc - ci[jc] = beta*cij + alpha*c64.DotuUnitary(ai, a[j*lda:j*lda+k]) - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - ai := a[i*lda : i*lda+k] - for j, cij := range ci { - ci[j] = beta*cij + alpha*c64.DotuUnitary(ai, a[j*lda:j*lda+k]) - } - } - } - } else { - // Form C = alpha*A^T*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - for jc := range ci { - ci[jc] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - if aji != 0 { - c64.AxpyUnitary(alpha*aji, a[j*lda+i:j*lda+n], ci) - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - for j := range ci { - ci[j] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - if aji != 0 { - c64.AxpyUnitary(alpha*aji, a[j*lda:j*lda+i+1], ci) - } - } - } - } - } -} - -// Csyr2k performs one of the symmetric rank-2k operations -// C = alpha*A*B^T + alpha*B*A^T + beta*C if trans == blas.NoTrans -// C = alpha*A^T*B + alpha*B^T*A + beta*C if trans == blas.Trans -// where alpha and beta are scalars, C is an n×n symmetric matrix and A and B -// are n×k matrices in the first case and k×n matrices in the second case. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Csyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { - var row, col int - switch trans { - default: - panic(badTranspose) - case blas.NoTrans: - row, col = n, k - case blas.Trans: - row, col = k, n - } - switch { - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case lda < max(1, col): - panic(badLdA) - case ldb < max(1, col): - panic(badLdB) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (row-1)*lda+col { - panic(shortA) - } - if len(b) < (row-1)*ldb+col { - panic(shortB) - } - if len(c) < (n-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - if alpha == 0 { - if uplo == blas.Upper { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - c64.ScalUnitary(beta, ci) - } - } - } else { - if beta == 0 { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - for j := range ci { - ci[j] = 0 - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - c64.ScalUnitary(beta, ci) - } - } - } - return - } - - if trans == blas.NoTrans { - // Form C = alpha*A*B^T + alpha*B*A^T + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - for jc := range ci { - j := i + jc - ci[jc] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) - } - } else { - for jc, cij := range ci { - j := i + jc - ci[jc] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - ai := a[i*lda : i*lda+k] - bi := b[i*ldb : i*ldb+k] - if beta == 0 { - for j := range ci { - ci[j] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) - } - } else { - for j, cij := range ci { - ci[j] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij - } - } - } - } - } else { - // Form C = alpha*A^T*B + alpha*B^T*A + beta*C. - if uplo == blas.Upper { - for i := 0; i < n; i++ { - ci := c[i*ldc+i : i*ldc+n] - switch { - case beta == 0: - for jc := range ci { - ci[jc] = 0 - } - case beta != 1: - for jc := range ci { - ci[jc] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c64.AxpyUnitary(alpha*aji, b[j*ldb+i:j*ldb+n], ci) - } - if bji != 0 { - c64.AxpyUnitary(alpha*bji, a[j*lda+i:j*lda+n], ci) - } - } - } - } else { - for i := 0; i < n; i++ { - ci := c[i*ldc : i*ldc+i+1] - switch { - case beta == 0: - for j := range ci { - ci[j] = 0 - } - case beta != 1: - for j := range ci { - ci[j] *= beta - } - } - for j := 0; j < k; j++ { - aji := a[j*lda+i] - bji := b[j*ldb+i] - if aji != 0 { - c64.AxpyUnitary(alpha*aji, b[j*ldb:j*ldb+i+1], ci) - } - if bji != 0 { - c64.AxpyUnitary(alpha*bji, a[j*lda:j*lda+i+1], ci) - } - } - } - } - } -} - -// Ctrmm performs one of the matrix-matrix operations -// B = alpha * op(A) * B if side == blas.Left, -// B = alpha * B * op(A) if side == blas.Right, -// where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, -// upper or lower triangular matrix and op(A) is one of -// op(A) = A if trans == blas.NoTrans, -// op(A) = A^T if trans == blas.Trans, -// op(A) = A^H if trans == blas.ConjTrans. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: - panic(badTranspose) - case diag != blas.Unit && diag != blas.NonUnit: - panic(badDiag) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (na-1)*lda+na { - panic(shortA) - } - if len(b) < (m-1)*ldb+n { - panic(shortB) - } - - // Quick return if possible. - if alpha == 0 { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j := range bi { - bi[j] = 0 - } - } - return - } - - noConj := trans != blas.ConjTrans - noUnit := diag == blas.NonUnit - if side == blas.Left { - if trans == blas.NoTrans { - // Form B = alpha*A*B. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - aii := alpha - if noUnit { - aii *= a[i*lda+i] - } - bi := b[i*ldb : i*ldb+n] - for j := range bi { - bi[j] *= aii - } - for ja, aij := range a[i*lda+i+1 : i*lda+m] { - j := ja + i + 1 - if aij != 0 { - c64.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) - } - } - } - } else { - for i := m - 1; i >= 0; i-- { - aii := alpha - if noUnit { - aii *= a[i*lda+i] - } - bi := b[i*ldb : i*ldb+n] - for j := range bi { - bi[j] *= aii - } - for j, aij := range a[i*lda : i*lda+i] { - if aij != 0 { - c64.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) - } - } - } - } - } else { - // Form B = alpha*A^T*B or B = alpha*A^H*B. - if uplo == blas.Upper { - for k := m - 1; k >= 0; k-- { - bk := b[k*ldb : k*ldb+n] - for ja, ajk := range a[k*lda+k+1 : k*lda+m] { - if ajk == 0 { - continue - } - j := k + 1 + ja - if noConj { - c64.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) - } else { - c64.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) - } - } - akk := alpha - if noUnit { - if noConj { - akk *= a[k*lda+k] - } else { - akk *= cmplx.Conj(a[k*lda+k]) - } - } - if akk != 1 { - c64.ScalUnitary(akk, bk) - } - } - } else { - for k := 0; k < m; k++ { - bk := b[k*ldb : k*ldb+n] - for j, ajk := range a[k*lda : k*lda+k] { - if ajk == 0 { - continue - } - if noConj { - c64.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) - } else { - c64.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) - } - } - akk := alpha - if noUnit { - if noConj { - akk *= a[k*lda+k] - } else { - akk *= cmplx.Conj(a[k*lda+k]) - } - } - if akk != 1 { - c64.ScalUnitary(akk, bk) - } - } - } - } - } else { - if trans == blas.NoTrans { - // Form B = alpha*B*A. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for k := n - 1; k >= 0; k-- { - abik := alpha * bi[k] - if abik == 0 { - continue - } - bi[k] = abik - if noUnit { - bi[k] *= a[k*lda+k] - } - c64.AxpyUnitary(abik, a[k*lda+k+1:k*lda+n], bi[k+1:]) - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for k := 0; k < n; k++ { - abik := alpha * bi[k] - if abik == 0 { - continue - } - bi[k] = abik - if noUnit { - bi[k] *= a[k*lda+k] - } - c64.AxpyUnitary(abik, a[k*lda:k*lda+k], bi[:k]) - } - } - } - } else { - // Form B = alpha*B*A^T or B = alpha*B*A^H. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j, bij := range bi { - if noConj { - if noUnit { - bij *= a[j*lda+j] - } - bij += c64.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - } else { - if noUnit { - bij *= cmplx.Conj(a[j*lda+j]) - } - bij += c64.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - } - bi[j] = alpha * bij - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - bij := bi[j] - if noConj { - if noUnit { - bij *= a[j*lda+j] - } - bij += c64.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) - } else { - if noUnit { - bij *= cmplx.Conj(a[j*lda+j]) - } - bij += c64.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) - } - bi[j] = alpha * bij - } - } - } - } - } -} - -// Ctrsm solves one of the matrix equations -// op(A) * X = alpha * B if side == blas.Left, -// X * op(A) = alpha * B if side == blas.Right, -// where alpha is a scalar, X and B are m×n matrices, A is a unit or -// non-unit, upper or lower triangular matrix and op(A) is one of -// op(A) = A if transA == blas.NoTrans, -// op(A) = A^T if transA == blas.Trans, -// op(A) = A^H if transA == blas.ConjTrans. -// On return the matrix X is overwritten on B. -// -// Complex64 implementations are autogenerated and not directly tested. -func (Implementation) Ctrsm(side blas.Side, uplo blas.Uplo, transA blas.Transpose, diag blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { - na := m - if side == blas.Right { - na = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case uplo != blas.Lower && uplo != blas.Upper: - panic(badUplo) - case transA != blas.NoTrans && transA != blas.Trans && transA != blas.ConjTrans: - panic(badTranspose) - case diag != blas.Unit && diag != blas.NonUnit: - panic(badDiag) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, na): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < (na-1)*lda+na { - panic(shortA) - } - if len(b) < (m-1)*ldb+n { - panic(shortB) - } - - if alpha == 0 { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - b[i*ldb+j] = 0 - } - } - return - } - - noConj := transA != blas.ConjTrans - noUnit := diag == blas.NonUnit - if side == blas.Left { - if transA == blas.NoTrans { - // Form B = alpha*inv(A)*B. - if uplo == blas.Upper { - for i := m - 1; i >= 0; i-- { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c64.ScalUnitary(alpha, bi) - } - for ka, aik := range a[i*lda+i+1 : i*lda+m] { - k := i + 1 + ka - if aik != 0 { - c64.AxpyUnitary(-aik, b[k*ldb:k*ldb+n], bi) - } - } - if noUnit { - c64.ScalUnitary(1/a[i*lda+i], bi) - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c64.ScalUnitary(alpha, bi) - } - for j, aij := range a[i*lda : i*lda+i] { - if aij != 0 { - c64.AxpyUnitary(-aij, b[j*ldb:j*ldb+n], bi) - } - } - if noUnit { - c64.ScalUnitary(1/a[i*lda+i], bi) - } - } - } - } else { - // Form B = alpha*inv(A^T)*B or B = alpha*inv(A^H)*B. - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if noUnit { - if noConj { - c64.ScalUnitary(1/a[i*lda+i], bi) - } else { - c64.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) - } - } - for ja, aij := range a[i*lda+i+1 : i*lda+m] { - if aij == 0 { - continue - } - j := i + 1 + ja - if noConj { - c64.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) - } else { - c64.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) - } - } - if alpha != 1 { - c64.ScalUnitary(alpha, bi) - } - } - } else { - for i := m - 1; i >= 0; i-- { - bi := b[i*ldb : i*ldb+n] - if noUnit { - if noConj { - c64.ScalUnitary(1/a[i*lda+i], bi) - } else { - c64.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) - } - } - for j, aij := range a[i*lda : i*lda+i] { - if aij == 0 { - continue - } - if noConj { - c64.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) - } else { - c64.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) - } - } - if alpha != 1 { - c64.ScalUnitary(alpha, bi) - } - } - } - } - } else { - if transA == blas.NoTrans { - // Form B = alpha*B*inv(A). - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c64.ScalUnitary(alpha, bi) - } - for j, bij := range bi { - if bij == 0 { - continue - } - if noUnit { - bi[j] /= a[j*lda+j] - } - c64.AxpyUnitary(-bi[j], a[j*lda+j+1:j*lda+n], bi[j+1:n]) - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - if alpha != 1 { - c64.ScalUnitary(alpha, bi) - } - for j := n - 1; j >= 0; j-- { - if bi[j] == 0 { - continue - } - if noUnit { - bi[j] /= a[j*lda+j] - } - c64.AxpyUnitary(-bi[j], a[j*lda:j*lda+j], bi[:j]) - } - } - } - } else { - // Form B = alpha*B*inv(A^T) or B = alpha*B*inv(A^H). - if uplo == blas.Upper { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - bij := alpha * bi[j] - if noConj { - bij -= c64.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - if noUnit { - bij /= a[j*lda+j] - } - } else { - bij -= c64.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) - if noUnit { - bij /= cmplx.Conj(a[j*lda+j]) - } - } - bi[j] = bij - } - } - } else { - for i := 0; i < m; i++ { - bi := b[i*ldb : i*ldb+n] - for j, bij := range bi { - bij *= alpha - if noConj { - bij -= c64.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) - if noUnit { - bij /= a[j*lda+j] - } - } else { - bij -= c64.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) - if noUnit { - bij /= cmplx.Conj(a[j*lda+j]) - } - } - bi[j] = bij - } - } - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go deleted file mode 100644 index 13c4a792e..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go +++ /dev/null @@ -1,876 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f32" -) - -var _ blas.Float32Level3 = Implementation{} - -// Strsm solves one of the matrix equations -// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left -// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right -// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a -// scalar. -// -// At entry to the function, X contains the values of B, and the result is -// stored in-place into X. -// -// No check is made that A is invertible. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - k := n - if s == blas.Left { - k = m - } - if lda < max(1, k) { - panic(badLdA) - } - if ldb < max(1, n) { - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(k-1)+k { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := m - 1; i >= 0; i-- { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f32.ScalUnitary(alpha, btmp) - } - for ka, va := range a[i*lda+i+1 : i*lda+m] { - if va != 0 { - k := ka + i + 1 - f32.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - f32.ScalUnitary(tmp, btmp) - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f32.ScalUnitary(alpha, btmp) - } - for k, va := range a[i*lda : i*lda+i] { - if va != 0 { - f32.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - f32.ScalUnitary(tmp, btmp) - } - } - return - } - // Cases where a is transposed - if ul == blas.Upper { - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - f32.ScalUnitary(tmp, btmpk) - } - for ia, va := range a[k*lda+k+1 : k*lda+m] { - if va != 0 { - i := ia + k + 1 - f32.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) - } - } - if alpha != 1 { - f32.ScalUnitary(alpha, btmpk) - } - } - return - } - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - f32.ScalUnitary(tmp, btmpk) - } - for i, va := range a[k*lda : k*lda+k] { - if va != 0 { - f32.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) - } - } - if alpha != 1 { - f32.ScalUnitary(alpha, btmpk) - } - } - return - } - // Cases where a is to the right of X. - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f32.ScalUnitary(alpha, btmp) - } - for k, vb := range btmp { - if vb == 0 { - continue - } - if nonUnit { - btmp[k] /= a[k*lda+k] - } - f32.AxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmp[k+1:n]) - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f32.ScalUnitary(alpha, btmp) - } - for k := n - 1; k >= 0; k-- { - if btmp[k] == 0 { - continue - } - if nonUnit { - btmp[k] /= a[k*lda+k] - } - f32.AxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp[:k]) - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := 0; j < n; j++ { - tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } -} - -// Ssymm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C if side == blas.Left -// C = alpha * B * A + beta * C if side == blas.Right -// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha -// is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssymm(s blas.Side, ul blas.Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { - if s != blas.Right && s != blas.Left { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - k := n - if s == blas.Left { - k = m - } - if lda < max(1, k) { - panic(badLdA) - } - if ldb < max(1, n) { - panic(badLdB) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(k-1)+k { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - if len(c) < ldc*(m-1)+n { - panic(shortC) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := 0; j < n; j++ { - ctmp[j] *= beta - } - } - return - } - - isUpper := ul == blas.Upper - if s == blas.Left { - for i := 0; i < m; i++ { - atmp := alpha * a[i*lda+i] - btmp := b[i*ldb : i*ldb+n] - ctmp := c[i*ldc : i*ldc+n] - for j, v := range btmp { - ctmp[j] *= beta - ctmp[j] += atmp * v - } - - for k := 0; k < i; k++ { - var atmp float32 - if isUpper { - atmp = a[k*lda+i] - } else { - atmp = a[i*lda+k] - } - atmp *= alpha - f32.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) - } - for k := i + 1; k < m; k++ { - var atmp float32 - if isUpper { - atmp = a[i*lda+k] - } else { - atmp = a[k*lda+i] - } - atmp *= alpha - f32.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) - } - } - return - } - if isUpper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - tmp := alpha * b[i*ldb+j] - var tmp2 float32 - atmp := a[j*lda+j+1 : j*lda+n] - btmp := b[i*ldb+j+1 : i*ldb+n] - ctmp := c[i*ldc+j+1 : i*ldc+n] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } - return - } - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - tmp := alpha * b[i*ldb+j] - var tmp2 float32 - atmp := a[j*lda : j*lda+j] - btmp := b[i*ldb : i*ldb+j] - ctmp := c[i*ldc : i*ldc+j] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } -} - -// Ssyrk performs one of the symmetric rank-k operations -// C = alpha * A * A^T + beta * C if tA == blas.NoTrans -// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans -// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and -// beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - row, col := k, n - if tA == blas.NoTrans { - row, col = n, k - } - if lda < max(1, col) { - panic(badLdA) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(row-1)+col { - panic(shortA) - } - if len(c) < ldc*(n-1)+n { - panic(shortC) - } - - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - atmp := a[i*lda : i*lda+k] - if beta == 0 { - for jc := range ctmp { - j := jc + i - ctmp[jc] = alpha * f32.DotUnitary(atmp, a[j*lda:j*lda+k]) - } - } else { - for jc, vc := range ctmp { - j := jc + i - ctmp[jc] = vc*beta + alpha*f32.DotUnitary(atmp, a[j*lda:j*lda+k]) - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - atmp := a[i*lda : i*lda+k] - if beta == 0 { - for j := range ctmp { - ctmp[j] = alpha * f32.DotUnitary(a[j*lda:j*lda+k], atmp) - } - } else { - for j, vc := range ctmp { - ctmp[j] = vc*beta + alpha*f32.DotUnitary(a[j*lda:j*lda+k], atmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta == 0 { - for j := range ctmp { - ctmp[j] = 0 - } - } else if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - f32.AxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp) - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - f32.AxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp) - } - } - } -} - -// Ssyr2k performs one of the symmetric rank 2k operations -// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans -// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans -// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and -// alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - row, col := k, n - if tA == blas.NoTrans { - row, col = n, k - } - if lda < max(1, col) { - panic(badLdA) - } - if ldb < max(1, col) { - panic(badLdB) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(row-1)+col { - panic(shortA) - } - if len(b) < ldb*(row-1)+col { - panic(shortB) - } - if len(c) < ldc*(n-1)+n { - panic(shortC) - } - - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*ldb : i*ldb+k] - ctmp := c[i*ldc+i : i*ldc+n] - for jc := range ctmp { - j := i + jc - var tmp1, tmp2 float32 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[jc] *= beta - ctmp[jc] += alpha * (tmp1 + tmp2) - } - } - return - } - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*ldb : i*ldb+k] - ctmp := c[i*ldc : i*ldc+i+1] - for j := 0; j <= i; j++ { - var tmp1, tmp2 float32 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[j] *= beta - ctmp[j] += alpha * (tmp1 + tmp2) - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*ldb+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb+i : l*ldb+n] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda+i : l*lda+n] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*ldb+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb : l*ldb+i+1] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda : l*lda+i+1] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } -} - -// Strmm performs one of the matrix-matrix operations -// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left -// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right -// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - k := n - if s == blas.Left { - k = m - } - if lda < max(1, k) { - panic(badLdA) - } - if ldb < max(1, n) { - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(k-1)+k { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - f32.ScalUnitary(tmp, btmp) - for ka, va := range a[i*lda+i+1 : i*lda+m] { - k := ka + i + 1 - if va != 0 { - f32.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) - } - } - } - return - } - for i := m - 1; i >= 0; i-- { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - f32.ScalUnitary(tmp, btmp) - for k, va := range a[i*lda : i*lda+i] { - if va != 0 { - f32.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - for ia, va := range a[k*lda+k+1 : k*lda+m] { - i := ia + k + 1 - btmp := b[i*ldb : i*ldb+n] - if va != 0 { - f32.AxpyUnitary(alpha*va, btmpk, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - f32.ScalUnitary(tmp, btmpk) - } - } - return - } - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - for i, va := range a[k*lda : k*lda+k] { - btmp := b[i*ldb : i*ldb+n] - if va != 0 { - f32.AxpyUnitary(alpha*va, btmpk, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - f32.ScalUnitary(tmp, btmpk) - } - } - return - } - // Cases where a is on the right - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := n - 1; k >= 0; k-- { - tmp := alpha * btmp[k] - if tmp == 0 { - continue - } - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - f32.AxpyUnitary(tmp, a[k*lda+k+1:k*lda+n], btmp[k+1:n]) - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := 0; k < n; k++ { - tmp := alpha * btmp[k] - if tmp == 0 { - continue - } - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - f32.AxpyUnitary(tmp, a[k*lda:k*lda+k], btmp[:k]) - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j, vb := range btmp { - tmp := vb - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) - btmp[j] = alpha * tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - tmp := btmp[j] - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += f32.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) - btmp[j] = alpha * tmp - } - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go deleted file mode 100644 index 9eebd9069..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go +++ /dev/null @@ -1,864 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f64" -) - -var _ blas.Float64Level3 = Implementation{} - -// Dtrsm solves one of the matrix equations -// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left -// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right -// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a -// scalar. -// -// At entry to the function, X contains the values of B, and the result is -// stored in-place into X. -// -// No check is made that A is invertible. -func (Implementation) Dtrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - k := n - if s == blas.Left { - k = m - } - if lda < max(1, k) { - panic(badLdA) - } - if ldb < max(1, n) { - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(k-1)+k { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := m - 1; i >= 0; i-- { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f64.ScalUnitary(alpha, btmp) - } - for ka, va := range a[i*lda+i+1 : i*lda+m] { - if va != 0 { - k := ka + i + 1 - f64.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - f64.ScalUnitary(tmp, btmp) - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f64.ScalUnitary(alpha, btmp) - } - for k, va := range a[i*lda : i*lda+i] { - if va != 0 { - f64.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - f64.ScalUnitary(tmp, btmp) - } - } - return - } - // Cases where a is transposed - if ul == blas.Upper { - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - f64.ScalUnitary(tmp, btmpk) - } - for ia, va := range a[k*lda+k+1 : k*lda+m] { - if va != 0 { - i := ia + k + 1 - f64.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) - } - } - if alpha != 1 { - f64.ScalUnitary(alpha, btmpk) - } - } - return - } - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - f64.ScalUnitary(tmp, btmpk) - } - for i, va := range a[k*lda : k*lda+k] { - if va != 0 { - f64.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) - } - } - if alpha != 1 { - f64.ScalUnitary(alpha, btmpk) - } - } - return - } - // Cases where a is to the right of X. - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f64.ScalUnitary(alpha, btmp) - } - for k, vb := range btmp { - if vb == 0 { - continue - } - if nonUnit { - btmp[k] /= a[k*lda+k] - } - f64.AxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmp[k+1:n]) - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - f64.ScalUnitary(alpha, btmp) - } - for k := n - 1; k >= 0; k-- { - if btmp[k] == 0 { - continue - } - if nonUnit { - btmp[k] /= a[k*lda+k] - } - f64.AxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp[:k]) - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := 0; j < n; j++ { - tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } -} - -// Dsymm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C if side == blas.Left -// C = alpha * B * A + beta * C if side == blas.Right -// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha -// is a scalar. -func (Implementation) Dsymm(s blas.Side, ul blas.Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { - if s != blas.Right && s != blas.Left { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - k := n - if s == blas.Left { - k = m - } - if lda < max(1, k) { - panic(badLdA) - } - if ldb < max(1, n) { - panic(badLdB) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(k-1)+k { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - if len(c) < ldc*(m-1)+n { - panic(shortC) - } - - // Quick return if possible. - if alpha == 0 && beta == 1 { - return - } - - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := 0; j < n; j++ { - ctmp[j] *= beta - } - } - return - } - - isUpper := ul == blas.Upper - if s == blas.Left { - for i := 0; i < m; i++ { - atmp := alpha * a[i*lda+i] - btmp := b[i*ldb : i*ldb+n] - ctmp := c[i*ldc : i*ldc+n] - for j, v := range btmp { - ctmp[j] *= beta - ctmp[j] += atmp * v - } - - for k := 0; k < i; k++ { - var atmp float64 - if isUpper { - atmp = a[k*lda+i] - } else { - atmp = a[i*lda+k] - } - atmp *= alpha - f64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) - } - for k := i + 1; k < m; k++ { - var atmp float64 - if isUpper { - atmp = a[i*lda+k] - } else { - atmp = a[k*lda+i] - } - atmp *= alpha - f64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) - } - } - return - } - if isUpper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - tmp := alpha * b[i*ldb+j] - var tmp2 float64 - atmp := a[j*lda+j+1 : j*lda+n] - btmp := b[i*ldb+j+1 : i*ldb+n] - ctmp := c[i*ldc+j+1 : i*ldc+n] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } - return - } - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - tmp := alpha * b[i*ldb+j] - var tmp2 float64 - atmp := a[j*lda : j*lda+j] - btmp := b[i*ldb : i*ldb+j] - ctmp := c[i*ldc : i*ldc+j] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } -} - -// Dsyrk performs one of the symmetric rank-k operations -// C = alpha * A * A^T + beta * C if tA == blas.NoTrans -// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans -// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and -// beta are scalars. -func (Implementation) Dsyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - row, col := k, n - if tA == blas.NoTrans { - row, col = n, k - } - if lda < max(1, col) { - panic(badLdA) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(row-1)+col { - panic(shortA) - } - if len(c) < ldc*(n-1)+n { - panic(shortC) - } - - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - atmp := a[i*lda : i*lda+k] - if beta == 0 { - for jc := range ctmp { - j := jc + i - ctmp[jc] = alpha * f64.DotUnitary(atmp, a[j*lda:j*lda+k]) - } - } else { - for jc, vc := range ctmp { - j := jc + i - ctmp[jc] = vc*beta + alpha*f64.DotUnitary(atmp, a[j*lda:j*lda+k]) - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - atmp := a[i*lda : i*lda+k] - if beta == 0 { - for j := range ctmp { - ctmp[j] = alpha * f64.DotUnitary(a[j*lda:j*lda+k], atmp) - } - } else { - for j, vc := range ctmp { - ctmp[j] = vc*beta + alpha*f64.DotUnitary(a[j*lda:j*lda+k], atmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta == 0 { - for j := range ctmp { - ctmp[j] = 0 - } - } else if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - f64.AxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp) - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - f64.AxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp) - } - } - } -} - -// Dsyr2k performs one of the symmetric rank 2k operations -// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans -// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans -// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and -// alpha and beta are scalars. -func (Implementation) Dsyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - row, col := k, n - if tA == blas.NoTrans { - row, col = n, k - } - if lda < max(1, col) { - panic(badLdA) - } - if ldb < max(1, col) { - panic(badLdB) - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(row-1)+col { - panic(shortA) - } - if len(b) < ldb*(row-1)+col { - panic(shortB) - } - if len(c) < ldc*(n-1)+n { - panic(shortC) - } - - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*ldb : i*ldb+k] - ctmp := c[i*ldc+i : i*ldc+n] - for jc := range ctmp { - j := i + jc - var tmp1, tmp2 float64 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[jc] *= beta - ctmp[jc] += alpha * (tmp1 + tmp2) - } - } - return - } - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*ldb : i*ldb+k] - ctmp := c[i*ldc : i*ldc+i+1] - for j := 0; j <= i; j++ { - var tmp1, tmp2 float64 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[j] *= beta - ctmp[j] += alpha * (tmp1 + tmp2) - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*ldb+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb+i : l*ldb+n] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda+i : l*lda+n] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*ldb+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb : l*ldb+i+1] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda : l*lda+i+1] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } -} - -// Dtrmm performs one of the matrix-matrix operations -// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left -// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right -// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. -func (Implementation) Dtrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - k := n - if s == blas.Left { - k = m - } - if lda < max(1, k) { - panic(badLdA) - } - if ldb < max(1, n) { - panic(badLdB) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if len(a) < lda*(k-1)+k { - panic(shortA) - } - if len(b) < ldb*(m-1)+n { - panic(shortB) - } - - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - f64.ScalUnitary(tmp, btmp) - for ka, va := range a[i*lda+i+1 : i*lda+m] { - k := ka + i + 1 - if va != 0 { - f64.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) - } - } - } - return - } - for i := m - 1; i >= 0; i-- { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - f64.ScalUnitary(tmp, btmp) - for k, va := range a[i*lda : i*lda+i] { - if va != 0 { - f64.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - for ia, va := range a[k*lda+k+1 : k*lda+m] { - i := ia + k + 1 - btmp := b[i*ldb : i*ldb+n] - if va != 0 { - f64.AxpyUnitary(alpha*va, btmpk, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - f64.ScalUnitary(tmp, btmpk) - } - } - return - } - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - for i, va := range a[k*lda : k*lda+k] { - btmp := b[i*ldb : i*ldb+n] - if va != 0 { - f64.AxpyUnitary(alpha*va, btmpk, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - f64.ScalUnitary(tmp, btmpk) - } - } - return - } - // Cases where a is on the right - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := n - 1; k >= 0; k-- { - tmp := alpha * btmp[k] - if tmp == 0 { - continue - } - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - f64.AxpyUnitary(tmp, a[k*lda+k+1:k*lda+n], btmp[k+1:n]) - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := 0; k < n; k++ { - tmp := alpha * btmp[k] - if tmp == 0 { - continue - } - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - f64.AxpyUnitary(tmp, a[k*lda:k*lda+k], btmp[:k]) - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j, vb := range btmp { - tmp := vb - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) - btmp[j] = alpha * tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - tmp := btmp[j] - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += f64.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) - btmp[j] = alpha * tmp - } - } -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go b/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go deleted file mode 100644 index e868a1050..000000000 --- a/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go +++ /dev/null @@ -1,318 +0,0 @@ -// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. - -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "runtime" - "sync" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/internal/asm/f32" -) - -// Sgemm performs one of the matrix-matrix operations -// C = alpha * A * B + beta * C -// C = alpha * A^T * B + beta * C -// C = alpha * A * B^T + beta * C -// C = alpha * A^T * B^T + beta * C -// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is -// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or -// B are transposed. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sgemm(tA, tB blas.Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { - switch tA { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - switch tB { - default: - panic(badTranspose) - case blas.NoTrans, blas.Trans, blas.ConjTrans: - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - aTrans := tA == blas.Trans || tA == blas.ConjTrans - if aTrans { - if lda < max(1, m) { - panic(badLdA) - } - } else { - if lda < max(1, k) { - panic(badLdA) - } - } - bTrans := tB == blas.Trans || tB == blas.ConjTrans - if bTrans { - if ldb < max(1, k) { - panic(badLdB) - } - } else { - if ldb < max(1, n) { - panic(badLdB) - } - } - if ldc < max(1, n) { - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - // For zero matrix size the following slice length checks are trivially satisfied. - if aTrans { - if len(a) < (k-1)*lda+m { - panic(shortA) - } - } else { - if len(a) < (m-1)*lda+k { - panic(shortA) - } - } - if bTrans { - if len(b) < (n-1)*ldb+k { - panic(shortB) - } - } else { - if len(b) < (k-1)*ldb+n { - panic(shortB) - } - } - if len(c) < (m-1)*ldc+n { - panic(shortC) - } - - // Quick return if possible. - if (alpha == 0 || k == 0) && beta == 1 { - return - } - - // scale c - if beta != 1 { - if beta == 0 { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - } - } - - sgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) -} - -func sgemmParallel(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { - // dgemmParallel computes a parallel matrix multiplication by partitioning - // a and b into sub-blocks, and updating c with the multiplication of the sub-block - // In all cases, - // A = [ A_11 A_12 ... A_1j - // A_21 A_22 ... A_2j - // ... - // A_i1 A_i2 ... A_ij] - // - // and same for B. All of the submatrix sizes are blockSize×blockSize except - // at the edges. - // - // In all cases, there is one dimension for each matrix along which - // C must be updated sequentially. - // Cij = \sum_k Aik Bki, (A * B) - // Cij = \sum_k Aki Bkj, (A^T * B) - // Cij = \sum_k Aik Bjk, (A * B^T) - // Cij = \sum_k Aki Bjk, (A^T * B^T) - // - // This code computes one {i, j} block sequentially along the k dimension, - // and computes all of the {i, j} blocks concurrently. This - // partitioning allows Cij to be updated in-place without race-conditions. - // Instead of launching a goroutine for each possible concurrent computation, - // a number of worker goroutines are created and channels are used to pass - // available and completed cases. - // - // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix - // multiplies, though this code does not copy matrices to attempt to eliminate - // cache misses. - - maxKLen := k - parBlocks := blocks(m, blockSize) * blocks(n, blockSize) - if parBlocks < minParBlock { - // The matrix multiplication is small in the dimensions where it can be - // computed concurrently. Just do it in serial. - sgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - } - - nWorkers := runtime.GOMAXPROCS(0) - if parBlocks < nWorkers { - nWorkers = parBlocks - } - // There is a tradeoff between the workers having to wait for work - // and a large buffer making operations slow. - buf := buffMul * nWorkers - if buf > parBlocks { - buf = parBlocks - } - - sendChan := make(chan subMul, buf) - - // Launch workers. A worker receives an {i, j} submatrix of c, and computes - // A_ik B_ki (or the transposed version) storing the result in c_ij. When the - // channel is finally closed, it signals to the waitgroup that it has finished - // computing. - var wg sync.WaitGroup - for i := 0; i < nWorkers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for sub := range sendChan { - i := sub.i - j := sub.j - leni := blockSize - if i+leni > m { - leni = m - i - } - lenj := blockSize - if j+lenj > n { - lenj = n - j - } - - cSub := sliceView32(c, ldc, i, j, leni, lenj) - - // Compute A_ik B_kj for all k - for k := 0; k < maxKLen; k += blockSize { - lenk := blockSize - if k+lenk > maxKLen { - lenk = maxKLen - k - } - var aSub, bSub []float32 - if aTrans { - aSub = sliceView32(a, lda, k, i, lenk, leni) - } else { - aSub = sliceView32(a, lda, i, k, leni, lenk) - } - if bTrans { - bSub = sliceView32(b, ldb, j, k, lenj, lenk) - } else { - bSub = sliceView32(b, ldb, k, j, lenk, lenj) - } - sgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) - } - } - }() - } - - // Send out all of the {i, j} subblocks for computation. - for i := 0; i < m; i += blockSize { - for j := 0; j < n; j += blockSize { - sendChan <- subMul{ - i: i, - j: j, - } - } - } - close(sendChan) - wg.Wait() -} - -// sgemmSerial is serial matrix multiply -func sgemmSerial(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { - switch { - case !aTrans && !bTrans: - sgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - case aTrans && !bTrans: - sgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - case !aTrans && bTrans: - sgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - case aTrans && bTrans: - sgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) - return - default: - panic("unreachable") - } -} - -// sgemmSerial where neither a nor b are transposed -func sgemmSerialNotNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for l, v := range a[i*lda : i*lda+k] { - tmp := alpha * v - if tmp != 0 { - f32.AxpyUnitary(tmp, b[l*ldb:l*ldb+n], ctmp) - } - } - } -} - -// sgemmSerial where neither a is transposed and b is not -func sgemmSerialTransNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < k; l++ { - btmp := b[l*ldb : l*ldb+n] - for i, v := range a[l*lda : l*lda+m] { - tmp := alpha * v - if tmp != 0 { - ctmp := c[i*ldc : i*ldc+n] - f32.AxpyUnitary(tmp, btmp, ctmp) - } - } - } -} - -// sgemmSerial where neither a is not transposed and b is -func sgemmSerialNotTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < m; i++ { - atmp := a[i*lda : i*lda+k] - ctmp := c[i*ldc : i*ldc+n] - for j := 0; j < n; j++ { - ctmp[j] += alpha * f32.DotUnitary(atmp, b[j*ldb:j*ldb+k]) - } - } -} - -// sgemmSerial where both are transposed -func sgemmSerialTransTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < k; l++ { - for i, v := range a[l*lda : l*lda+m] { - tmp := alpha * v - if tmp != 0 { - ctmp := c[i*ldc : i*ldc+n] - f32.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) - } - } - } -} - -func sliceView32(a []float32, lda, i, j, r, c int) []float32 { - return a[i*lda+j : (i+r-1)*lda+j+c] -} diff --git a/vendor/gonum.org/v1/gonum/floats/doc.go b/vendor/gonum.org/v1/gonum/floats/doc.go deleted file mode 100644 index bfe05c191..000000000 --- a/vendor/gonum.org/v1/gonum/floats/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package floats provides a set of helper routines for dealing with slices -// of float64. The functions avoid allocations to allow for use within tight -// loops without garbage collection overhead. -// -// The convention used is that when a slice is being modified in place, it has -// the name dst. -package floats // import "gonum.org/v1/gonum/floats" diff --git a/vendor/gonum.org/v1/gonum/floats/floats.go b/vendor/gonum.org/v1/gonum/floats/floats.go deleted file mode 100644 index ae004a621..000000000 --- a/vendor/gonum.org/v1/gonum/floats/floats.go +++ /dev/null @@ -1,933 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this code is governed by a BSD-style -// license that can be found in the LICENSE file - -package floats - -import ( - "errors" - "math" - "sort" - "strconv" - - "gonum.org/v1/gonum/internal/asm/f64" -) - -// Add adds, element-wise, the elements of s and dst, and stores in dst. -// Panics if the lengths of dst and s do not match. -func Add(dst, s []float64) { - if len(dst) != len(s) { - panic("floats: length of the slices do not match") - } - f64.AxpyUnitaryTo(dst, 1, s, dst) -} - -// AddTo adds, element-wise, the elements of s and t and -// stores the result in dst. Panics if the lengths of s, t and dst do not match. -func AddTo(dst, s, t []float64) []float64 { - if len(s) != len(t) { - panic("floats: length of adders do not match") - } - if len(dst) != len(s) { - panic("floats: length of destination does not match length of adder") - } - f64.AxpyUnitaryTo(dst, 1, s, t) - return dst -} - -// AddConst adds the scalar c to all of the values in dst. -func AddConst(c float64, dst []float64) { - f64.AddConst(c, dst) -} - -// AddScaled performs dst = dst + alpha * s. -// It panics if the lengths of dst and s are not equal. -func AddScaled(dst []float64, alpha float64, s []float64) { - if len(dst) != len(s) { - panic("floats: length of destination and source to not match") - } - f64.AxpyUnitaryTo(dst, alpha, s, dst) -} - -// AddScaledTo performs dst = y + alpha * s, where alpha is a scalar, -// and dst, y and s are all slices. -// It panics if the lengths of dst, y, and s are not equal. -// -// At the return of the function, dst[i] = y[i] + alpha * s[i] -func AddScaledTo(dst, y []float64, alpha float64, s []float64) []float64 { - if len(dst) != len(s) || len(dst) != len(y) { - panic("floats: lengths of slices do not match") - } - f64.AxpyUnitaryTo(dst, alpha, s, y) - return dst -} - -// argsort is a helper that implements sort.Interface, as used by -// Argsort. -type argsort struct { - s []float64 - inds []int -} - -func (a argsort) Len() int { - return len(a.s) -} - -func (a argsort) Less(i, j int) bool { - return a.s[i] < a.s[j] -} - -func (a argsort) Swap(i, j int) { - a.s[i], a.s[j] = a.s[j], a.s[i] - a.inds[i], a.inds[j] = a.inds[j], a.inds[i] -} - -// Argsort sorts the elements of dst while tracking their original order. -// At the conclusion of Argsort, dst will contain the original elements of dst -// but sorted in increasing order, and inds will contain the original position -// of the elements in the slice such that dst[i] = origDst[inds[i]]. -// It panics if the lengths of dst and inds do not match. -func Argsort(dst []float64, inds []int) { - if len(dst) != len(inds) { - panic("floats: length of inds does not match length of slice") - } - for i := range dst { - inds[i] = i - } - - a := argsort{s: dst, inds: inds} - sort.Sort(a) -} - -// Count applies the function f to every element of s and returns the number -// of times the function returned true. -func Count(f func(float64) bool, s []float64) int { - var n int - for _, val := range s { - if f(val) { - n++ - } - } - return n -} - -// CumProd finds the cumulative product of the first i elements in -// s and puts them in place into the ith element of the -// destination dst. A panic will occur if the lengths of arguments -// do not match. -// -// At the return of the function, dst[i] = s[i] * s[i-1] * s[i-2] * ... -func CumProd(dst, s []float64) []float64 { - if len(dst) != len(s) { - panic("floats: length of destination does not match length of the source") - } - if len(dst) == 0 { - return dst - } - return f64.CumProd(dst, s) -} - -// CumSum finds the cumulative sum of the first i elements in -// s and puts them in place into the ith element of the -// destination dst. A panic will occur if the lengths of arguments -// do not match. -// -// At the return of the function, dst[i] = s[i] + s[i-1] + s[i-2] + ... -func CumSum(dst, s []float64) []float64 { - if len(dst) != len(s) { - panic("floats: length of destination does not match length of the source") - } - if len(dst) == 0 { - return dst - } - return f64.CumSum(dst, s) -} - -// Distance computes the L-norm of s - t. See Norm for special cases. -// A panic will occur if the lengths of s and t do not match. -func Distance(s, t []float64, L float64) float64 { - if len(s) != len(t) { - panic("floats: slice lengths do not match") - } - if len(s) == 0 { - return 0 - } - var norm float64 - if L == 2 { - for i, v := range s { - diff := t[i] - v - norm = math.Hypot(norm, diff) - } - return norm - } - if L == 1 { - for i, v := range s { - norm += math.Abs(t[i] - v) - } - return norm - } - if math.IsInf(L, 1) { - for i, v := range s { - absDiff := math.Abs(t[i] - v) - if absDiff > norm { - norm = absDiff - } - } - return norm - } - for i, v := range s { - norm += math.Pow(math.Abs(t[i]-v), L) - } - return math.Pow(norm, 1/L) -} - -// Div performs element-wise division dst / s -// and stores the value in dst. It panics if the -// lengths of s and t are not equal. -func Div(dst, s []float64) { - if len(dst) != len(s) { - panic("floats: slice lengths do not match") - } - f64.Div(dst, s) -} - -// DivTo performs element-wise division s / t -// and stores the value in dst. It panics if the -// lengths of s, t, and dst are not equal. -func DivTo(dst, s, t []float64) []float64 { - if len(s) != len(t) || len(dst) != len(t) { - panic("floats: slice lengths do not match") - } - return f64.DivTo(dst, s, t) -} - -// Dot computes the dot product of s1 and s2, i.e. -// sum_{i = 1}^N s1[i]*s2[i]. -// A panic will occur if lengths of arguments do not match. -func Dot(s1, s2 []float64) float64 { - if len(s1) != len(s2) { - panic("floats: lengths of the slices do not match") - } - return f64.DotUnitary(s1, s2) -} - -// Equal returns true if the slices have equal lengths and -// all elements are numerically identical. -func Equal(s1, s2 []float64) bool { - if len(s1) != len(s2) { - return false - } - for i, val := range s1 { - if s2[i] != val { - return false - } - } - return true -} - -// EqualApprox returns true if the slices have equal lengths and -// all element pairs have an absolute tolerance less than tol or a -// relative tolerance less than tol. -func EqualApprox(s1, s2 []float64, tol float64) bool { - if len(s1) != len(s2) { - return false - } - for i, a := range s1 { - if !EqualWithinAbsOrRel(a, s2[i], tol, tol) { - return false - } - } - return true -} - -// EqualFunc returns true if the slices have the same lengths -// and the function returns true for all element pairs. -func EqualFunc(s1, s2 []float64, f func(float64, float64) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, val := range s1 { - if !f(val, s2[i]) { - return false - } - } - return true -} - -// EqualWithinAbs returns true if a and b have an absolute -// difference of less than tol. -func EqualWithinAbs(a, b, tol float64) bool { - return a == b || math.Abs(a-b) <= tol -} - -const minNormalFloat64 = 2.2250738585072014e-308 - -// EqualWithinRel returns true if the difference between a and b -// is not greater than tol times the greater value. -func EqualWithinRel(a, b, tol float64) bool { - if a == b { - return true - } - delta := math.Abs(a - b) - if delta <= minNormalFloat64 { - return delta <= tol*minNormalFloat64 - } - // We depend on the division in this relationship to identify - // infinities (we rely on the NaN to fail the test) otherwise - // we compare Infs of the same sign and evaluate Infs as equal - // independent of sign. - return delta/math.Max(math.Abs(a), math.Abs(b)) <= tol -} - -// EqualWithinAbsOrRel returns true if a and b are equal to within -// the absolute tolerance. -func EqualWithinAbsOrRel(a, b, absTol, relTol float64) bool { - if EqualWithinAbs(a, b, absTol) { - return true - } - return EqualWithinRel(a, b, relTol) -} - -// EqualWithinULP returns true if a and b are equal to within -// the specified number of floating point units in the last place. -func EqualWithinULP(a, b float64, ulp uint) bool { - if a == b { - return true - } - if math.IsNaN(a) || math.IsNaN(b) { - return false - } - if math.Signbit(a) != math.Signbit(b) { - return math.Float64bits(math.Abs(a))+math.Float64bits(math.Abs(b)) <= uint64(ulp) - } - return ulpDiff(math.Float64bits(a), math.Float64bits(b)) <= uint64(ulp) -} - -func ulpDiff(a, b uint64) uint64 { - if a > b { - return a - b - } - return b - a -} - -// EqualLengths returns true if all of the slices have equal length, -// and false otherwise. Returns true if there are no input slices. -func EqualLengths(slices ...[]float64) bool { - // This length check is needed: http://play.golang.org/p/sdty6YiLhM - if len(slices) == 0 { - return true - } - l := len(slices[0]) - for i := 1; i < len(slices); i++ { - if len(slices[i]) != l { - return false - } - } - return true -} - -// Find applies f to every element of s and returns the indices of the first -// k elements for which the f returns true, or all such elements -// if k < 0. -// Find will reslice inds to have 0 length, and will append -// found indices to inds. -// If k > 0 and there are fewer than k elements in s satisfying f, -// all of the found elements will be returned along with an error. -// At the return of the function, the input inds will be in an undetermined state. -func Find(inds []int, f func(float64) bool, s []float64, k int) ([]int, error) { - // inds is also returned to allow for calling with nil - - // Reslice inds to have zero length - inds = inds[:0] - - // If zero elements requested, can just return - if k == 0 { - return inds, nil - } - - // If k < 0, return all of the found indices - if k < 0 { - for i, val := range s { - if f(val) { - inds = append(inds, i) - } - } - return inds, nil - } - - // Otherwise, find the first k elements - nFound := 0 - for i, val := range s { - if f(val) { - inds = append(inds, i) - nFound++ - if nFound == k { - return inds, nil - } - } - } - // Finished iterating over the loop, which means k elements were not found - return inds, errors.New("floats: insufficient elements found") -} - -// HasNaN returns true if the slice s has any values that are NaN and false -// otherwise. -func HasNaN(s []float64) bool { - for _, v := range s { - if math.IsNaN(v) { - return true - } - } - return false -} - -// LogSpan returns a set of n equally spaced points in log space between, -// l and u where N is equal to len(dst). The first element of the -// resulting dst will be l and the final element of dst will be u. -// Panics if len(dst) < 2 -// Note that this call will return NaNs if either l or u are negative, and -// will return all zeros if l or u is zero. -// Also returns the mutated slice dst, so that it can be used in range, like: -// -// for i, x := range LogSpan(dst, l, u) { ... } -func LogSpan(dst []float64, l, u float64) []float64 { - Span(dst, math.Log(l), math.Log(u)) - for i := range dst { - dst[i] = math.Exp(dst[i]) - } - return dst -} - -// LogSumExp returns the log of the sum of the exponentials of the values in s. -// Panics if s is an empty slice. -func LogSumExp(s []float64) float64 { - // Want to do this in a numerically stable way which avoids - // overflow and underflow - // First, find the maximum value in the slice. - maxval := Max(s) - if math.IsInf(maxval, 0) { - // If it's infinity either way, the logsumexp will be infinity as well - // returning now avoids NaNs - return maxval - } - var lse float64 - // Compute the sumexp part - for _, val := range s { - lse += math.Exp(val - maxval) - } - // Take the log and add back on the constant taken out - return math.Log(lse) + maxval -} - -// Max returns the maximum value in the input slice. If the slice is empty, Max will panic. -func Max(s []float64) float64 { - return s[MaxIdx(s)] -} - -// MaxIdx returns the index of the maximum value in the input slice. If several -// entries have the maximum value, the first such index is returned. If the slice -// is empty, MaxIdx will panic. -func MaxIdx(s []float64) int { - if len(s) == 0 { - panic("floats: zero slice length") - } - max := math.NaN() - var ind int - for i, v := range s { - if math.IsNaN(v) { - continue - } - if v > max || math.IsNaN(max) { - max = v - ind = i - } - } - return ind -} - -// Min returns the maximum value in the input slice. If the slice is empty, Min will panic. -func Min(s []float64) float64 { - return s[MinIdx(s)] -} - -// MinIdx returns the index of the minimum value in the input slice. If several -// entries have the maximum value, the first such index is returned. If the slice -// is empty, MinIdx will panic. -func MinIdx(s []float64) int { - if len(s) == 0 { - panic("floats: zero slice length") - } - min := math.NaN() - var ind int - for i, v := range s { - if math.IsNaN(v) { - continue - } - if v < min || math.IsNaN(min) { - min = v - ind = i - } - } - return ind -} - -// Mul performs element-wise multiplication between dst -// and s and stores the value in dst. Panics if the -// lengths of s and t are not equal. -func Mul(dst, s []float64) { - if len(dst) != len(s) { - panic("floats: slice lengths do not match") - } - for i, val := range s { - dst[i] *= val - } -} - -// MulTo performs element-wise multiplication between s -// and t and stores the value in dst. Panics if the -// lengths of s, t, and dst are not equal. -func MulTo(dst, s, t []float64) []float64 { - if len(s) != len(t) || len(dst) != len(t) { - panic("floats: slice lengths do not match") - } - for i, val := range t { - dst[i] = val * s[i] - } - return dst -} - -const ( - nanBits = 0x7ff8000000000000 - nanMask = 0xfff8000000000000 -) - -// NaNWith returns an IEEE 754 "quiet not-a-number" value with the -// payload specified in the low 51 bits of payload. -// The NaN returned by math.NaN has a bit pattern equal to NaNWith(1). -func NaNWith(payload uint64) float64 { - return math.Float64frombits(nanBits | (payload &^ nanMask)) -} - -// NaNPayload returns the lowest 51 bits payload of an IEEE 754 "quiet -// not-a-number". For values of f other than quiet-NaN, NaNPayload -// returns zero and false. -func NaNPayload(f float64) (payload uint64, ok bool) { - b := math.Float64bits(f) - if b&nanBits != nanBits { - return 0, false - } - return b &^ nanMask, true -} - -// NearestIdx returns the index of the element in s -// whose value is nearest to v. If several such -// elements exist, the lowest index is returned. -// NearestIdx panics if len(s) == 0. -func NearestIdx(s []float64, v float64) int { - if len(s) == 0 { - panic("floats: zero length slice") - } - switch { - case math.IsNaN(v): - return 0 - case math.IsInf(v, 1): - return MaxIdx(s) - case math.IsInf(v, -1): - return MinIdx(s) - } - var ind int - dist := math.NaN() - for i, val := range s { - newDist := math.Abs(v - val) - // A NaN distance will not be closer. - if math.IsNaN(newDist) { - continue - } - if newDist < dist || math.IsNaN(dist) { - dist = newDist - ind = i - } - } - return ind -} - -// NearestIdxForSpan return the index of a hypothetical vector created -// by Span with length n and bounds l and u whose value is closest -// to v. That is, NearestIdxForSpan(n, l, u, v) is equivalent to -// Nearest(Span(make([]float64, n),l,u),v) without an allocation. -// NearestIdxForSpan panics if n is less than two. -func NearestIdxForSpan(n int, l, u float64, v float64) int { - if n <= 1 { - panic("floats: span must have length >1") - } - if math.IsNaN(v) { - return 0 - } - - // Special cases for Inf and NaN. - switch { - case math.IsNaN(l) && !math.IsNaN(u): - return n - 1 - case math.IsNaN(u): - return 0 - case math.IsInf(l, 0) && math.IsInf(u, 0): - if l == u { - return 0 - } - if n%2 == 1 { - if !math.IsInf(v, 0) { - return n / 2 - } - if math.Copysign(1, v) == math.Copysign(1, l) { - return 0 - } - return n/2 + 1 - } - if math.Copysign(1, v) == math.Copysign(1, l) { - return 0 - } - return n / 2 - case math.IsInf(l, 0): - if v == l { - return 0 - } - return n - 1 - case math.IsInf(u, 0): - if v == u { - return n - 1 - } - return 0 - case math.IsInf(v, -1): - if l <= u { - return 0 - } - return n - 1 - case math.IsInf(v, 1): - if u <= l { - return 0 - } - return n - 1 - } - - // Special cases for v outside (l, u) and (u, l). - switch { - case l < u: - if v <= l { - return 0 - } - if v >= u { - return n - 1 - } - case l > u: - if v >= l { - return 0 - } - if v <= u { - return n - 1 - } - default: - return 0 - } - - // Can't guarantee anything about exactly halfway between - // because of floating point weirdness. - return int((float64(n)-1)/(u-l)*(v-l) + 0.5) -} - -// Norm returns the L norm of the slice S, defined as -// (sum_{i=1}^N s[i]^L)^{1/L} -// Special cases: -// L = math.Inf(1) gives the maximum absolute value. -// Does not correctly compute the zero norm (use Count). -func Norm(s []float64, L float64) float64 { - // Should this complain if L is not positive? - // Should this be done in log space for better numerical stability? - // would be more cost - // maybe only if L is high? - if len(s) == 0 { - return 0 - } - if L == 2 { - twoNorm := math.Abs(s[0]) - for i := 1; i < len(s); i++ { - twoNorm = math.Hypot(twoNorm, s[i]) - } - return twoNorm - } - var norm float64 - if L == 1 { - for _, val := range s { - norm += math.Abs(val) - } - return norm - } - if math.IsInf(L, 1) { - for _, val := range s { - norm = math.Max(norm, math.Abs(val)) - } - return norm - } - for _, val := range s { - norm += math.Pow(math.Abs(val), L) - } - return math.Pow(norm, 1/L) -} - -// ParseWithNA converts the string s to a float64 in v. -// If s equals missing, w is returned as 0, otherwise 1. -func ParseWithNA(s, missing string) (v, w float64, err error) { - if s == missing { - return 0, 0, nil - } - v, err = strconv.ParseFloat(s, 64) - if err == nil { - w = 1 - } - return v, w, err -} - -// Prod returns the product of the elements of the slice. -// Returns 1 if len(s) = 0. -func Prod(s []float64) float64 { - prod := 1.0 - for _, val := range s { - prod *= val - } - return prod -} - -// Reverse reverses the order of elements in the slice. -func Reverse(s []float64) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } -} - -// Round returns the half away from zero rounded value of x with prec precision. -// -// Special cases are: -// Round(±0) = +0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN -func Round(x float64, prec int) float64 { - if x == 0 { - // Make sure zero is returned - // without the negative bit set. - return 0 - } - // Fast path for positive precision on integers. - if prec >= 0 && x == math.Trunc(x) { - return x - } - pow := math.Pow10(prec) - intermed := x * pow - if math.IsInf(intermed, 0) { - return x - } - if x < 0 { - x = math.Ceil(intermed - 0.5) - } else { - x = math.Floor(intermed + 0.5) - } - - if x == 0 { - return 0 - } - - return x / pow -} - -// RoundEven returns the half even rounded value of x with prec precision. -// -// Special cases are: -// RoundEven(±0) = +0 -// RoundEven(±Inf) = ±Inf -// RoundEven(NaN) = NaN -func RoundEven(x float64, prec int) float64 { - if x == 0 { - // Make sure zero is returned - // without the negative bit set. - return 0 - } - // Fast path for positive precision on integers. - if prec >= 0 && x == math.Trunc(x) { - return x - } - pow := math.Pow10(prec) - intermed := x * pow - if math.IsInf(intermed, 0) { - return x - } - if isHalfway(intermed) { - correction, _ := math.Modf(math.Mod(intermed, 2)) - intermed += correction - if intermed > 0 { - x = math.Floor(intermed) - } else { - x = math.Ceil(intermed) - } - } else { - if x < 0 { - x = math.Ceil(intermed - 0.5) - } else { - x = math.Floor(intermed + 0.5) - } - } - - if x == 0 { - return 0 - } - - return x / pow -} - -func isHalfway(x float64) bool { - _, frac := math.Modf(x) - frac = math.Abs(frac) - return frac == 0.5 || (math.Nextafter(frac, math.Inf(-1)) < 0.5 && math.Nextafter(frac, math.Inf(1)) > 0.5) -} - -// Same returns true if the input slices have the same length and the all elements -// have the same value with NaN treated as the same. -func Same(s, t []float64) bool { - if len(s) != len(t) { - return false - } - for i, v := range s { - w := t[i] - if v != w && !(math.IsNaN(v) && math.IsNaN(w)) { - return false - } - } - return true -} - -// Scale multiplies every element in dst by the scalar c. -func Scale(c float64, dst []float64) { - if len(dst) > 0 { - f64.ScalUnitary(c, dst) - } -} - -// ScaleTo multiplies the elements in s by c and stores the result in dst. -func ScaleTo(dst []float64, c float64, s []float64) []float64 { - if len(dst) != len(s) { - panic("floats: lengths of slices do not match") - } - if len(dst) > 0 { - f64.ScalUnitaryTo(dst, c, s) - } - return dst -} - -// Span returns a set of N equally spaced points between l and u, where N -// is equal to the length of the destination. The first element of the destination -// is l, the final element of the destination is u. -// -// Panics if len(dst) < 2. -// -// Span also returns the mutated slice dst, so that it can be used in range expressions, -// like: -// -// for i, x := range Span(dst, l, u) { ... } -func Span(dst []float64, l, u float64) []float64 { - n := len(dst) - if n < 2 { - panic("floats: destination must have length >1") - } - - // Special cases for Inf and NaN. - switch { - case math.IsNaN(l): - for i := range dst[:len(dst)-1] { - dst[i] = math.NaN() - } - dst[len(dst)-1] = u - return dst - case math.IsNaN(u): - for i := range dst[1:] { - dst[i+1] = math.NaN() - } - dst[0] = l - return dst - case math.IsInf(l, 0) && math.IsInf(u, 0): - for i := range dst[:len(dst)/2] { - dst[i] = l - dst[len(dst)-i-1] = u - } - if len(dst)%2 == 1 { - if l != u { - dst[len(dst)/2] = 0 - } else { - dst[len(dst)/2] = l - } - } - return dst - case math.IsInf(l, 0): - for i := range dst[:len(dst)-1] { - dst[i] = l - } - dst[len(dst)-1] = u - return dst - case math.IsInf(u, 0): - for i := range dst[1:] { - dst[i+1] = u - } - dst[0] = l - return dst - } - - step := (u - l) / float64(n-1) - for i := range dst { - dst[i] = l + step*float64(i) - } - return dst -} - -// Sub subtracts, element-wise, the elements of s from dst. Panics if -// the lengths of dst and s do not match. -func Sub(dst, s []float64) { - if len(dst) != len(s) { - panic("floats: length of the slices do not match") - } - f64.AxpyUnitaryTo(dst, -1, s, dst) -} - -// SubTo subtracts, element-wise, the elements of t from s and -// stores the result in dst. Panics if the lengths of s, t and dst do not match. -func SubTo(dst, s, t []float64) []float64 { - if len(s) != len(t) { - panic("floats: length of subtractor and subtractee do not match") - } - if len(dst) != len(s) { - panic("floats: length of destination does not match length of subtractor") - } - f64.AxpyUnitaryTo(dst, -1, t, s) - return dst -} - -// Sum returns the sum of the elements of the slice. -func Sum(s []float64) float64 { - return f64.Sum(s) -} - -// Within returns the first index i where s[i] <= v < s[i+1]. Within panics if: -// - len(s) < 2 -// - s is not sorted -func Within(s []float64, v float64) int { - if len(s) < 2 { - panic("floats: slice length less than 2") - } - if !sort.Float64sAreSorted(s) { - panic("floats: input slice not sorted") - } - if v < s[0] || v >= s[len(s)-1] || math.IsNaN(v) { - return -1 - } - for i, f := range s[1:] { - if v < f { - return i - } - } - return -1 -} diff --git a/vendor/gonum.org/v1/gonum/graph/doc.go b/vendor/gonum.org/v1/gonum/graph/doc.go deleted file mode 100644 index 7eedd09ce..000000000 --- a/vendor/gonum.org/v1/gonum/graph/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package graph defines graph interfaces. -// -// Routines to test contract compliance by user implemented graph types -// are available in gonum.org/v1/gonum/graph/testgraph. -package graph // import "gonum.org/v1/gonum/graph" diff --git a/vendor/gonum.org/v1/gonum/graph/formats/cytoscapejs/testdata/LICENSE b/vendor/gonum.org/v1/gonum/graph/formats/cytoscapejs/testdata/LICENSE deleted file mode 100644 index 9e21a7e3e..000000000 --- a/vendor/gonum.org/v1/gonum/graph/formats/cytoscapejs/testdata/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ - - -Copyright (c) 2016-2018, The Cytoscape Consortium. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/graph/formats/sigmajs/testdata/LICENSE.txt b/vendor/gonum.org/v1/gonum/graph/formats/sigmajs/testdata/LICENSE.txt deleted file mode 100644 index 81739df1a..000000000 --- a/vendor/gonum.org/v1/gonum/graph/formats/sigmajs/testdata/LICENSE.txt +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (C) 2013-2014, Alexis Jacomy, http://sigmajs.org - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/vendor/gonum.org/v1/gonum/graph/graph.go b/vendor/gonum.org/v1/gonum/graph/graph.go deleted file mode 100644 index c973583d8..000000000 --- a/vendor/gonum.org/v1/gonum/graph/graph.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graph - -// Node is a graph node. It returns a graph-unique integer ID. -type Node interface { - ID() int64 -} - -// Edge is a graph edge. In directed graphs, the direction of the -// edge is given from -> to, otherwise the edge is semantically -// unordered. -type Edge interface { - // From returns the from node of the edge. - From() Node - - // To returns the to node of the edge. - To() Node - - // ReversedEdge returns an edge that has - // the end points of the receiver swapped. - ReversedEdge() Edge -} - -// WeightedEdge is a weighted graph edge. In directed graphs, the direction -// of the edge is given from -> to, otherwise the edge is semantically -// unordered. -type WeightedEdge interface { - Edge - Weight() float64 -} - -// Graph is a generalized graph. -type Graph interface { - // Node returns the node with the given ID if it exists - // in the graph, and nil otherwise. - Node(id int64) Node - - // Nodes returns all the nodes in the graph. - // - // Nodes must not return nil. - Nodes() Nodes - - // From returns all nodes that can be reached directly - // from the node with the given ID. - // - // From must not return nil. - From(id int64) Nodes - - // HasEdgeBetween returns whether an edge exists between - // nodes with IDs xid and yid without considering direction. - HasEdgeBetween(xid, yid int64) bool - - // Edge returns the edge from u to v, with IDs uid and vid, - // if such an edge exists and nil otherwise. The node v - // must be directly reachable from u as defined by the - // From method. - Edge(uid, vid int64) Edge -} - -// Weighted is a weighted graph. -type Weighted interface { - Graph - - // WeightedEdge returns the weighted edge from u to v - // with IDs uid and vid if such an edge exists and - // nil otherwise. The node v must be directly - // reachable from u as defined by the From method. - WeightedEdge(uid, vid int64) WeightedEdge - - // Weight returns the weight for the edge between - // x and y with IDs xid and yid if Edge(xid, yid) - // returns a non-nil Edge. - // If x and y are the same node or there is no - // joining edge between the two nodes the weight - // value returned is implementation dependent. - // Weight returns true if an edge exists between - // x and y or if x and y have the same ID, false - // otherwise. - Weight(xid, yid int64) (w float64, ok bool) -} - -// Undirected is an undirected graph. -type Undirected interface { - Graph - - // EdgeBetween returns the edge between nodes x and y - // with IDs xid and yid. - EdgeBetween(xid, yid int64) Edge -} - -// WeightedUndirected is a weighted undirected graph. -type WeightedUndirected interface { - Weighted - - // WeightedEdgeBetween returns the edge between nodes - // x and y with IDs xid and yid. - WeightedEdgeBetween(xid, yid int64) WeightedEdge -} - -// Directed is a directed graph. -type Directed interface { - Graph - - // HasEdgeFromTo returns whether an edge exists - // in the graph from u to v with IDs uid and vid. - HasEdgeFromTo(uid, vid int64) bool - - // To returns all nodes that can reach directly - // to the node with the given ID. - // - // To must not return nil. - To(id int64) Nodes -} - -// WeightedDirected is a weighted directed graph. -type WeightedDirected interface { - Weighted - - // HasEdgeFromTo returns whether an edge exists - // in the graph from u to v with the IDs uid and - // vid. - HasEdgeFromTo(uid, vid int64) bool - - // To returns all nodes that can reach directly - // to the node with the given ID. - // - // To must not return nil. - To(id int64) Nodes -} - -// NodeAdder is an interface for adding arbitrary nodes to a graph. -type NodeAdder interface { - // NewNode returns a new Node with a unique - // arbitrary ID. - NewNode() Node - - // AddNode adds a node to the graph. AddNode panics if - // the added node ID matches an existing node ID. - AddNode(Node) -} - -// NodeRemover is an interface for removing nodes from a graph. -type NodeRemover interface { - // RemoveNode removes the node with the given ID - // from the graph, as well as any edges attached - // to it. If the node is not in the graph it is - // a no-op. - RemoveNode(id int64) -} - -// EdgeAdder is an interface for adding edges to a graph. -type EdgeAdder interface { - // NewEdge returns a new Edge from the source to the destination node. - NewEdge(from, to Node) Edge - - // SetEdge adds an edge from one node to another. - // If the graph supports node addition the nodes - // will be added if they do not exist, otherwise - // SetEdge will panic. - // The behavior of an EdgeAdder when the IDs - // returned by e.From() and e.To() are equal is - // implementation-dependent. - // Whether e, e.From() and e.To() are stored - // within the graph is implementation dependent. - SetEdge(e Edge) -} - -// WeightedEdgeAdder is an interface for adding edges to a graph. -type WeightedEdgeAdder interface { - // NewWeightedEdge returns a new WeightedEdge from - // the source to the destination node. - NewWeightedEdge(from, to Node, weight float64) WeightedEdge - - // SetWeightedEdge adds an edge from one node to - // another. If the graph supports node addition - // the nodes will be added if they do not exist, - // otherwise SetWeightedEdge will panic. - // The behavior of a WeightedEdgeAdder when the IDs - // returned by e.From() and e.To() are equal is - // implementation-dependent. - // Whether e, e.From() and e.To() are stored - // within the graph is implementation dependent. - SetWeightedEdge(e WeightedEdge) -} - -// EdgeRemover is an interface for removing nodes from a graph. -type EdgeRemover interface { - // RemoveEdge removes the edge with the given end - // IDs, leaving the terminal nodes. If the edge - // does not exist it is a no-op. - RemoveEdge(fid, tid int64) -} - -// Builder is a graph that can have nodes and edges added. -type Builder interface { - NodeAdder - EdgeAdder -} - -// WeightedBuilder is a graph that can have nodes and weighted edges added. -type WeightedBuilder interface { - NodeAdder - WeightedEdgeAdder -} - -// UndirectedBuilder is an undirected graph builder. -type UndirectedBuilder interface { - Undirected - Builder -} - -// UndirectedWeightedBuilder is an undirected weighted graph builder. -type UndirectedWeightedBuilder interface { - Undirected - WeightedBuilder -} - -// DirectedBuilder is a directed graph builder. -type DirectedBuilder interface { - Directed - Builder -} - -// DirectedWeightedBuilder is a directed weighted graph builder. -type DirectedWeightedBuilder interface { - Directed - WeightedBuilder -} - -// Copy copies nodes and edges as undirected edges from the source to the destination -// without first clearing the destination. Copy will panic if a node ID in the source -// graph matches a node ID in the destination. -// -// If the source is undirected and the destination is directed both directions will -// be present in the destination after the copy is complete. -func Copy(dst Builder, src Graph) { - nodes := src.Nodes() - for nodes.Next() { - dst.AddNode(nodes.Node()) - } - nodes.Reset() - for nodes.Next() { - u := nodes.Node() - uid := u.ID() - to := src.From(uid) - for to.Next() { - v := to.Node() - dst.SetEdge(src.Edge(uid, v.ID())) - } - } -} - -// CopyWeighted copies nodes and edges as undirected edges from the source to the destination -// without first clearing the destination. Copy will panic if a node ID in the source -// graph matches a node ID in the destination. -// -// If the source is undirected and the destination is directed both directions will -// be present in the destination after the copy is complete. -// -// If the source is a directed graph, the destination is undirected, and a fundamental -// cycle exists with two nodes where the edge weights differ, the resulting destination -// graph's edge weight between those nodes is undefined. If there is a defined function -// to resolve such conflicts, an UndirectWeighted may be used to do this. -func CopyWeighted(dst WeightedBuilder, src Weighted) { - nodes := src.Nodes() - for nodes.Next() { - dst.AddNode(nodes.Node()) - } - nodes.Reset() - for nodes.Next() { - u := nodes.Node() - uid := u.ID() - to := src.From(uid) - for to.Next() { - v := to.Node() - dst.SetWeightedEdge(src.WeightedEdge(uid, v.ID())) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go deleted file mode 100644 index 88d1cb80a..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package linear provides common linear data structures. -package linear // import "gonum.org/v1/gonum/graph/internal/linear" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go b/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go deleted file mode 100644 index 62e19db6a..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package linear - -import ( - "gonum.org/v1/gonum/graph" -) - -// NodeStack implements a LIFO stack of graph.Node. -type NodeStack []graph.Node - -// Len returns the number of graph.Nodes on the stack. -func (s *NodeStack) Len() int { return len(*s) } - -// Pop returns the last graph.Node on the stack and removes it -// from the stack. -func (s *NodeStack) Pop() graph.Node { - v := *s - v, n := v[:len(v)-1], v[len(v)-1] - *s = v - return n -} - -// Push adds the node n to the stack at the last position. -func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } - -// NodeQueue implements a FIFO queue. -type NodeQueue struct { - head int - data []graph.Node -} - -// Len returns the number of graph.Nodes in the queue. -func (q *NodeQueue) Len() int { return len(q.data) - q.head } - -// Enqueue adds the node n to the back of the queue. -func (q *NodeQueue) Enqueue(n graph.Node) { - if len(q.data) == cap(q.data) && q.head > 0 { - l := q.Len() - copy(q.data, q.data[q.head:]) - q.head = 0 - q.data = append(q.data[:l], n) - } else { - q.data = append(q.data, n) - } -} - -// Dequeue returns the graph.Node at the front of the queue and -// removes it from the queue. -func (q *NodeQueue) Dequeue() graph.Node { - if q.Len() == 0 { - panic("queue: empty queue") - } - - var n graph.Node - n, q.data[q.head] = q.data[q.head], nil - q.head++ - - if q.Len() == 0 { - q.head = 0 - q.data = q.data[:0] - } - - return n -} - -// Reset clears the queue for reuse. -func (q *NodeQueue) Reset() { - q.head = 0 - q.data = q.data[:0] -} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go deleted file mode 100644 index 563df6f2e..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ordered provides common sort ordering types. -package ordered // import "gonum.org/v1/gonum/graph/internal/ordered" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go deleted file mode 100644 index a7250d1f3..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ordered - -import "gonum.org/v1/gonum/graph" - -// ByID implements the sort.Interface sorting a slice of graph.Node -// by ID. -type ByID []graph.Node - -func (n ByID) Len() int { return len(n) } -func (n ByID) Less(i, j int) bool { return n[i].ID() < n[j].ID() } -func (n ByID) Swap(i, j int) { n[i], n[j] = n[j], n[i] } - -// BySliceValues implements the sort.Interface sorting a slice of -// []int64 lexically by the values of the []int64. -type BySliceValues [][]int64 - -func (c BySliceValues) Len() int { return len(c) } -func (c BySliceValues) Less(i, j int) bool { - a, b := c[i], c[j] - l := len(a) - if len(b) < l { - l = len(b) - } - for k, v := range a[:l] { - if v < b[k] { - return true - } - if v > b[k] { - return false - } - } - return len(a) < len(b) -} -func (c BySliceValues) Swap(i, j int) { c[i], c[j] = c[j], c[i] } - -// BySliceIDs implements the sort.Interface sorting a slice of -// []graph.Node lexically by the IDs of the []graph.Node. -type BySliceIDs [][]graph.Node - -func (c BySliceIDs) Len() int { return len(c) } -func (c BySliceIDs) Less(i, j int) bool { - a, b := c[i], c[j] - l := len(a) - if len(b) < l { - l = len(b) - } - for k, v := range a[:l] { - if v.ID() < b[k].ID() { - return true - } - if v.ID() > b[k].ID() { - return false - } - } - return len(a) < len(b) -} -func (c BySliceIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } - -// Int64s implements the sort.Interface sorting a slice of -// int64. -type Int64s []int64 - -func (s Int64s) Len() int { return len(s) } -func (s Int64s) Less(i, j int) bool { return s[i] < s[j] } -func (s Int64s) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// Reverse reverses the order of nodes. -func Reverse(nodes []graph.Node) { - for i, j := 0, len(nodes)-1; i < j; i, j = i+1, j-1 { - nodes[i], nodes[j] = nodes[j], nodes[i] - } -} - -// LinesByIDs implements the sort.Interface sorting a slice of graph.LinesByIDs -// lexically by the From IDs, then by the To IDs, finally by the Line IDs. -type LinesByIDs []graph.Line - -func (n LinesByIDs) Len() int { return len(n) } -func (n LinesByIDs) Less(i, j int) bool { - a, b := n[i], n[j] - if a.From().ID() != b.From().ID() { - return a.From().ID() < b.From().ID() - } - if a.To().ID() != b.To().ID() { - return a.To().ID() < b.To().ID() - } - return n[i].ID() < n[j].ID() -} -func (n LinesByIDs) Swap(i, j int) { n[i], n[j] = n[j], n[i] } diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go deleted file mode 100644 index 86f2afc4e..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package set provides integer and graph.Node sets. -package set // import "gonum.org/v1/gonum/graph/internal/set" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go deleted file mode 100644 index f95a4e128..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!safe - -package set - -import "unsafe" - -// same determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use unsafe to get -// the maps' pointer values to compare. -func same(a, b Nodes) bool { - return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) -} - -// intsSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use unsafe to get -// the maps' pointer values to compare. -func intsSame(a, b Ints) bool { - return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) -} - -// int64sSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use unsafe to get -// the maps' pointer values to compare. -func int64sSame(a, b Int64s) bool { - return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) -} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go deleted file mode 100644 index 4ff4f4ed2..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine safe - -package set - -import "reflect" - -// same determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use reflect to get -// the maps' pointer values to compare. -func same(a, b Nodes) bool { - return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() -} - -// intsSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use reflect to get -// the maps' pointer values to compare. -func intsSame(a, b Ints) bool { - return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() -} - -// int64sSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use reflect to get -// the maps' pointer values to compare. -func int64sSame(a, b Int64s) bool { - return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() -} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go deleted file mode 100644 index 0506b8e97..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package set - -import "gonum.org/v1/gonum/graph" - -// Ints is a set of int identifiers. -type Ints map[int]struct{} - -// The simple accessor methods for Ints are provided to allow ease of -// implementation change should the need arise. - -// Add inserts an element into the set. -func (s Ints) Add(e int) { - s[e] = struct{}{} -} - -// Has reports the existence of the element in the set. -func (s Ints) Has(e int) bool { - _, ok := s[e] - return ok -} - -// Remove deletes the specified element from the set. -func (s Ints) Remove(e int) { - delete(s, e) -} - -// Count reports the number of elements stored in the set. -func (s Ints) Count() int { - return len(s) -} - -// IntsEqual reports set equality between the parameters. Sets are equal if -// and only if they have the same elements. -func IntsEqual(a, b Ints) bool { - if intsSame(a, b) { - return true - } - - if len(a) != len(b) { - return false - } - - for e := range a { - if _, ok := b[e]; !ok { - return false - } - } - - return true -} - -// Int64s is a set of int64 identifiers. -type Int64s map[int64]struct{} - -// The simple accessor methods for Ints are provided to allow ease of -// implementation change should the need arise. - -// Add inserts an element into the set. -func (s Int64s) Add(e int64) { - s[e] = struct{}{} -} - -// Has reports the existence of the element in the set. -func (s Int64s) Has(e int64) bool { - _, ok := s[e] - return ok -} - -// Remove deletes the specified element from the set. -func (s Int64s) Remove(e int64) { - delete(s, e) -} - -// Count reports the number of elements stored in the set. -func (s Int64s) Count() int { - return len(s) -} - -// Int64sEqual reports set equality between the parameters. Sets are equal if -// and only if they have the same elements. -func Int64sEqual(a, b Int64s) bool { - if int64sSame(a, b) { - return true - } - - if len(a) != len(b) { - return false - } - - for e := range a { - if _, ok := b[e]; !ok { - return false - } - } - - return true -} - -// Nodes is a set of nodes keyed in their integer identifiers. -type Nodes map[int64]graph.Node - -// NewNodes returns a new Nodes. -func NewNodes() Nodes { - return make(Nodes) -} - -// NewNodes returns a new Nodes with the given size hint, n. -func NewNodesSize(n int) Nodes { - return make(Nodes, n) -} - -// The simple accessor methods for Nodes are provided to allow ease of -// implementation change should the need arise. - -// Add inserts an element into the set. -func (s Nodes) Add(n graph.Node) { - s[n.ID()] = n -} - -// Remove deletes the specified element from the set. -func (s Nodes) Remove(e graph.Node) { - delete(s, e.ID()) -} - -// Count returns the number of element in the set. -func (s Nodes) Count() int { - return len(s) -} - -// Has reports the existence of the elements in the set. -func (s Nodes) Has(n graph.Node) bool { - _, ok := s[n.ID()] - return ok -} - -// CloneNodes returns a clone of src. -func CloneNodes(src Nodes) Nodes { - dst := make(Nodes, len(src)) - for e, n := range src { - dst[e] = n - } - return dst -} - -// Equal reports set equality between the parameters. Sets are equal if -// and only if they have the same elements. -func Equal(a, b Nodes) bool { - if same(a, b) { - return true - } - - if len(a) != len(b) { - return false - } - - for e := range a { - if _, ok := b[e]; !ok { - return false - } - } - - return true -} - -// UnionOfNodes returns the union of a and b. -// -// The union of two sets, a and b, is the set containing all the -// elements of each, for instance: -// -// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} -// -// Since sets may not have repetition, unions of two sets that overlap -// do not contain repeat elements, that is: -// -// {a,b,c} UNION {b,c,d} = {a,b,c,d} -// -func UnionOfNodes(a, b Nodes) Nodes { - if same(a, b) { - return CloneNodes(a) - } - - dst := make(Nodes) - for e, n := range a { - dst[e] = n - } - for e, n := range b { - dst[e] = n - } - - return dst -} - -// IntersectionOfNodes returns the intersection of a and b. -// -// The intersection of two sets, a and b, is the set containing all -// the elements shared between the two sets, for instance: -// -// {a,b,c} INTERSECT {b,c,d} = {b,c} -// -// The intersection between a set and itself is itself, and thus -// effectively a copy operation: -// -// {a,b,c} INTERSECT {a,b,c} = {a,b,c} -// -// The intersection between two sets that share no elements is the empty -// set: -// -// {a,b,c} INTERSECT {d,e,f} = {} -// -func IntersectionOfNodes(a, b Nodes) Nodes { - if same(a, b) { - return CloneNodes(a) - } - dst := make(Nodes) - if len(a) > len(b) { - a, b = b, a - } - for e, n := range a { - if _, ok := b[e]; ok { - dst[e] = n - } - } - return dst -} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go b/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go deleted file mode 100644 index 5f503c13d..000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uid implements unique ID provision for graphs. -package uid - -import "gonum.org/v1/gonum/graph/internal/set" - -// Max is the maximum value of int64. -const Max = int64(^uint64(0) >> 1) - -// Set implements available ID storage. -type Set struct { - maxID int64 - used, free set.Int64s -} - -// NewSet returns a new Set. The returned value should not be passed except by pointer. -func NewSet() Set { - return Set{maxID: -1, used: make(set.Int64s), free: make(set.Int64s)} -} - -// NewID returns a new unique ID. The ID returned is not considered used -// until passed in a call to use. -func (s *Set) NewID() int64 { - for id := range s.free { - return id - } - if s.maxID != Max { - return s.maxID + 1 - } - for id := int64(0); id <= s.maxID+1; id++ { - if !s.used.Has(id) { - return id - } - } - panic("unreachable") -} - -// Use adds the id to the used IDs in the Set. -func (s *Set) Use(id int64) { - s.used.Add(id) - s.free.Remove(id) - if id > s.maxID { - s.maxID = id - } -} - -// Release frees the id for reuse. -func (s *Set) Release(id int64) { - s.free.Add(id) - s.used.Remove(id) -} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/doc.go b/vendor/gonum.org/v1/gonum/graph/iterator/doc.go deleted file mode 100644 index 0983bc7c3..000000000 --- a/vendor/gonum.org/v1/gonum/graph/iterator/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package iterator provides node, edge and line iterators. -// -// The iterators provided satisfy the graph.Nodes, graph.Edges and -// graph.Lines interfaces. -package iterator diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/edges.go b/vendor/gonum.org/v1/gonum/graph/iterator/edges.go deleted file mode 100644 index 21ef0433e..000000000 --- a/vendor/gonum.org/v1/gonum/graph/iterator/edges.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package iterator - -import "gonum.org/v1/gonum/graph" - -// OrderedEdges implements the graph.Edges and graph.EdgeSlicer interfaces. -// The iteration order of OrderedEdges is the order of edges passed to -// NewEdgeIterator. -type OrderedEdges struct { - idx int - edges []graph.Edge -} - -// NewOrderedEdges returns an OrderedEdges initialized with the provided edges. -func NewOrderedEdges(edges []graph.Edge) *OrderedEdges { - return &OrderedEdges{idx: -1, edges: edges} -} - -// Len returns the remaining number of edges to be iterated over. -func (e *OrderedEdges) Len() int { - if e.idx >= len(e.edges) { - return 0 - } - if e.idx <= 0 { - return len(e.edges) - } - return len(e.edges[e.idx:]) -} - -// Next returns whether the next call of Edge will return a valid edge. -func (e *OrderedEdges) Next() bool { - if uint(e.idx)+1 < uint(len(e.edges)) { - e.idx++ - return true - } - e.idx = len(e.edges) - return false -} - -// Edge returns the current edge of the iterator. Next must have been -// called prior to a call to Edge. -func (e *OrderedEdges) Edge() graph.Edge { - if e.idx >= len(e.edges) || e.idx < 0 { - return nil - } - return e.edges[e.idx] -} - -// EdgeSlice returns all the remaining edges in the iterator and advances -// the iterator. -func (e *OrderedEdges) EdgeSlice() []graph.Edge { - if e.idx >= len(e.edges) { - return nil - } - idx := e.idx - if idx == -1 { - idx = 0 - } - e.idx = len(e.edges) - return e.edges[idx:] -} - -// Reset returns the iterator to its initial state. -func (e *OrderedEdges) Reset() { - e.idx = -1 -} - -// OrderedWeightedEdges implements the graph.Edges and graph.EdgeSlicer interfaces. -// The iteration order of OrderedWeightedEdges is the order of edges passed to -// NewEdgeIterator. -type OrderedWeightedEdges struct { - idx int - edges []graph.WeightedEdge -} - -// NewOrderedWeightedEdges returns an OrderedWeightedEdges initialized with the provided edges. -func NewOrderedWeightedEdges(edges []graph.WeightedEdge) *OrderedWeightedEdges { - return &OrderedWeightedEdges{idx: -1, edges: edges} -} - -// Len returns the remaining number of edges to be iterated over. -func (e *OrderedWeightedEdges) Len() int { - if e.idx >= len(e.edges) { - return 0 - } - if e.idx <= 0 { - return len(e.edges) - } - return len(e.edges[e.idx:]) -} - -// Next returns whether the next call of WeightedEdge will return a valid edge. -func (e *OrderedWeightedEdges) Next() bool { - if uint(e.idx)+1 < uint(len(e.edges)) { - e.idx++ - return true - } - e.idx = len(e.edges) - return false -} - -// WeightedEdge returns the current edge of the iterator. Next must have been -// called prior to a call to WeightedEdge. -func (e *OrderedWeightedEdges) WeightedEdge() graph.WeightedEdge { - if e.idx >= len(e.edges) || e.idx < 0 { - return nil - } - return e.edges[e.idx] -} - -// WeightedEdgeSlice returns all the remaining edges in the iterator and advances -// the iterator. -func (e *OrderedWeightedEdges) WeightedEdgeSlice() []graph.WeightedEdge { - if e.idx >= len(e.edges) { - return nil - } - idx := e.idx - if idx == -1 { - idx = 0 - } - e.idx = len(e.edges) - return e.edges[idx:] -} - -// Reset returns the iterator to its initial state. -func (e *OrderedWeightedEdges) Reset() { - e.idx = -1 -} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/lines.go b/vendor/gonum.org/v1/gonum/graph/iterator/lines.go deleted file mode 100644 index ed655df01..000000000 --- a/vendor/gonum.org/v1/gonum/graph/iterator/lines.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package iterator - -import "gonum.org/v1/gonum/graph" - -// OrderedLines implements the graph.Lines and graph.LineSlicer interfaces. -// The iteration order of OrderedLines is the order of lines passed to -// NewLineIterator. -type OrderedLines struct { - idx int - lines []graph.Line -} - -// NewOrderedLines returns an OrderedLines initialized with the provided lines. -func NewOrderedLines(lines []graph.Line) *OrderedLines { - return &OrderedLines{idx: -1, lines: lines} -} - -// Len returns the remaining number of lines to be iterated over. -func (e *OrderedLines) Len() int { - if e.idx >= len(e.lines) { - return 0 - } - if e.idx <= 0 { - return len(e.lines) - } - return len(e.lines[e.idx:]) -} - -// Next returns whether the next call of Line will return a valid line. -func (e *OrderedLines) Next() bool { - if uint(e.idx)+1 < uint(len(e.lines)) { - e.idx++ - return true - } - e.idx = len(e.lines) - return false -} - -// Line returns the current line of the iterator. Next must have been -// called prior to a call to Line. -func (e *OrderedLines) Line() graph.Line { - if e.idx >= len(e.lines) || e.idx < 0 { - return nil - } - return e.lines[e.idx] -} - -// LineSlice returns all the remaining lines in the iterator and advances -// the iterator. -func (e *OrderedLines) LineSlice() []graph.Line { - if e.idx >= len(e.lines) { - return nil - } - idx := e.idx - if idx == -1 { - idx = 0 - } - e.idx = len(e.lines) - return e.lines[idx:] -} - -// Reset returns the iterator to its initial state. -func (e *OrderedLines) Reset() { - e.idx = -1 -} - -// OrderedWeightedLines implements the graph.Lines and graph.LineSlicer interfaces. -// The iteration order of OrderedWeightedLines is the order of lines passed to -// NewLineIterator. -type OrderedWeightedLines struct { - idx int - lines []graph.WeightedLine -} - -// NewWeightedLineIterator returns an OrderedWeightedLines initialized with the provided lines. -func NewOrderedWeightedLines(lines []graph.WeightedLine) *OrderedWeightedLines { - return &OrderedWeightedLines{idx: -1, lines: lines} -} - -// Len returns the remaining number of lines to be iterated over. -func (e *OrderedWeightedLines) Len() int { - if e.idx >= len(e.lines) { - return 0 - } - if e.idx <= 0 { - return len(e.lines) - } - return len(e.lines[e.idx:]) -} - -// Next returns whether the next call of WeightedLine will return a valid line. -func (e *OrderedWeightedLines) Next() bool { - if uint(e.idx)+1 < uint(len(e.lines)) { - e.idx++ - return true - } - e.idx = len(e.lines) - return false -} - -// WeightedLine returns the current line of the iterator. Next must have been -// called prior to a call to WeightedLine. -func (e *OrderedWeightedLines) WeightedLine() graph.WeightedLine { - if e.idx >= len(e.lines) || e.idx < 0 { - return nil - } - return e.lines[e.idx] -} - -// WeightedLineSlice returns all the remaining lines in the iterator and advances -// the iterator. -func (e *OrderedWeightedLines) WeightedLineSlice() []graph.WeightedLine { - if e.idx >= len(e.lines) { - return nil - } - idx := e.idx - if idx == -1 { - idx = 0 - } - e.idx = len(e.lines) - return e.lines[idx:] -} - -// Reset returns the iterator to its initial state. -func (e *OrderedWeightedLines) Reset() { - e.idx = -1 -} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go b/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go deleted file mode 100644 index 952dd770f..000000000 --- a/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package iterator - -import "gonum.org/v1/gonum/graph" - -// OrderedNodes implements the graph.Nodes and graph.NodeSlicer interfaces. -// The iteration order of OrderedNodes is the order of nodes passed to -// NewNodeIterator. -type OrderedNodes struct { - idx int - nodes []graph.Node -} - -// NewOrderedNodes returns a OrderedNodes initialized with the provided nodes. -func NewOrderedNodes(nodes []graph.Node) *OrderedNodes { - return &OrderedNodes{idx: -1, nodes: nodes} -} - -// Len returns the remaining number of nodes to be iterated over. -func (n *OrderedNodes) Len() int { - if n.idx >= len(n.nodes) { - return 0 - } - if n.idx <= 0 { - return len(n.nodes) - } - return len(n.nodes[n.idx:]) -} - -// Next returns whether the next call of Node will return a valid node. -func (n *OrderedNodes) Next() bool { - if uint(n.idx)+1 < uint(len(n.nodes)) { - n.idx++ - return true - } - n.idx = len(n.nodes) - return false -} - -// Node returns the current node of the iterator. Next must have been -// called prior to a call to Node. -func (n *OrderedNodes) Node() graph.Node { - if n.idx >= len(n.nodes) || n.idx < 0 { - return nil - } - return n.nodes[n.idx] -} - -// NodeSlice returns all the remaining nodes in the iterator and advances -// the iterator. -func (n *OrderedNodes) NodeSlice() []graph.Node { - if n.idx >= len(n.nodes) { - return nil - } - idx := n.idx - if idx == -1 { - idx = 0 - } - n.idx = len(n.nodes) - return n.nodes[idx:] -} - -// Reset returns the iterator to its initial state. -func (n *OrderedNodes) Reset() { - n.idx = -1 -} - -// ImplicitNodes implements the graph.Nodes interface for a set of nodes over -// a contiguous ID range. -type ImplicitNodes struct { - beg, end int - curr int - newNode func(id int) graph.Node -} - -// NewImplicitNodes returns a new implicit node iterator spanning nodes in [beg,end). -// The provided new func maps the id to a graph.Node. NewImplicitNodes will panic -// if beg is greater than end. -func NewImplicitNodes(beg, end int, new func(id int) graph.Node) *ImplicitNodes { - if beg > end { - panic("iterator: invalid range") - } - return &ImplicitNodes{beg: beg, end: end, curr: beg - 1, newNode: new} -} - -// Len returns the remaining number of nodes to be iterated over. -func (n *ImplicitNodes) Len() int { - return n.end - n.curr - 1 -} - -// Next returns whether the next call of Node will return a valid node. -func (n *ImplicitNodes) Next() bool { - if n.curr == n.end { - return false - } - n.curr++ - return n.curr < n.end -} - -// Node returns the current node of the iterator. Next must have been -// called prior to a call to Node. -func (n *ImplicitNodes) Node() graph.Node { - if n.Len() == -1 || n.curr < n.beg { - return nil - } - return n.newNode(n.curr) -} - -// Reset returns the iterator to its initial state. -func (n *ImplicitNodes) Reset() { - n.curr = n.beg - 1 -} - -// NodeSlice returns all the remaining nodes in the iterator and advances -// the iterator. -func (n *ImplicitNodes) NodeSlice() []graph.Node { - nodes := make([]graph.Node, 0, n.Len()) - for n.curr++; n.curr < n.end; n.curr++ { - nodes = append(nodes, n.newNode(n.curr)) - } - return nodes -} diff --git a/vendor/gonum.org/v1/gonum/graph/multigraph.go b/vendor/gonum.org/v1/gonum/graph/multigraph.go deleted file mode 100644 index 038a3d515..000000000 --- a/vendor/gonum.org/v1/gonum/graph/multigraph.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graph - -// Line is an edge in a multigraph. A Line returns an ID that must -// distinguish Lines sharing Node end points. -type Line interface { - // From returns the from node of the edge. - From() Node - - // To returns the to node of the edge. - To() Node - - // ReversedLine returns a line that has the - // end points of the receiver swapped. - ReversedLine() Line - - // ID returns the unique ID for the Line. - ID() int64 -} - -// WeightedLine is a weighted multigraph edge. -type WeightedLine interface { - Line - Weight() float64 -} - -// Multigraph is a generalized multigraph. -type Multigraph interface { - // Node returns the node with the given ID if it exists - // in the multigraph, and nil otherwise. - Node(id int64) Node - - // Nodes returns all the nodes in the multigraph. - // - // Nodes must not return nil. - Nodes() Nodes - - // From returns all nodes that can be reached directly - // from the node with the given ID. - // - // From must not return nil. - From(id int64) Nodes - - // HasEdgeBetween returns whether an edge exists between - // nodes with IDs xid and yid without considering direction. - HasEdgeBetween(xid, yid int64) bool - - // Lines returns the lines from u to v, with IDs uid and - // vid, if any such lines exist and nil otherwise. The - // node v must be directly reachable from u as defined by - // the From method. - // - // Lines must not return nil. - Lines(uid, vid int64) Lines -} - -// WeightedMultigraph is a weighted multigraph. -type WeightedMultigraph interface { - Multigraph - - // WeightedLines returns the weighted lines from u to v - // with IDs uid and vid if any such lines exist and nil - // otherwise. The node v must be directly reachable - // from u as defined by the From method. - // - // WeightedLines must not return nil. - WeightedLines(uid, vid int64) WeightedLines -} - -// UndirectedMultigraph is an undirected multigraph. -type UndirectedMultigraph interface { - Multigraph - - // LinesBetween returns the lines between nodes x and y - // with IDs xid and yid. - // - // LinesBetween must not return nil. - LinesBetween(xid, yid int64) Lines -} - -// WeightedUndirectedMultigraph is a weighted undirected multigraph. -type WeightedUndirectedMultigraph interface { - WeightedMultigraph - - // WeightedLinesBetween returns the lines between nodes - // x and y with IDs xid and yid. - // - // WeightedLinesBetween must not return nil. - WeightedLinesBetween(xid, yid int64) WeightedLines -} - -// DirectedMultigraph is a directed multigraph. -type DirectedMultigraph interface { - Multigraph - - // HasEdgeFromTo returns whether an edge exists - // in the multigraph from u to v with IDs uid - // and vid. - HasEdgeFromTo(uid, vid int64) bool - - // To returns all nodes that can reach directly - // to the node with the given ID. - // - // To must not return nil. - To(id int64) Nodes -} - -// WeightedDirectedMultigraph is a weighted directed multigraph. -type WeightedDirectedMultigraph interface { - WeightedMultigraph - - // HasEdgeFromTo returns whether an edge exists - // in the multigraph from u to v with IDs uid - // and vid. - HasEdgeFromTo(uid, vid int64) bool - - // To returns all nodes that can reach directly - // to the node with the given ID. - // - // To must not return nil. - To(id int64) Nodes -} - -// LineAdder is an interface for adding lines to a multigraph. -type LineAdder interface { - // NewLine returns a new Line from the source to the destination node. - NewLine(from, to Node) Line - - // SetLine adds a Line from one node to another. - // If the multigraph supports node addition the nodes - // will be added if they do not exist, otherwise - // SetLine will panic. - // Whether l, l.From() and l.To() are stored - // within the graph is implementation dependent. - SetLine(l Line) -} - -// WeightedLineAdder is an interface for adding lines to a multigraph. -type WeightedLineAdder interface { - // NewWeightedLine returns a new WeightedLine from - // the source to the destination node. - NewWeightedLine(from, to Node, weight float64) WeightedLine - - // SetWeightedLine adds a weighted line from one node - // to another. If the multigraph supports node addition - // the nodes will be added if they do not exist, - // otherwise SetWeightedLine will panic. - // Whether l, l.From() and l.To() are stored - // within the graph is implementation dependent. - SetWeightedLine(l WeightedLine) -} - -// LineRemover is an interface for removing lines from a multigraph. -type LineRemover interface { - // RemoveLine removes the line with the given end - // and line IDs, leaving the terminal nodes. If - // the line does not exist it is a no-op. - RemoveLine(fid, tid, id int64) -} - -// MultigraphBuilder is a multigraph that can have nodes and lines added. -type MultigraphBuilder interface { - NodeAdder - LineAdder -} - -// WeightedMultigraphBuilder is a multigraph that can have nodes and weighted lines added. -type WeightedMultigraphBuilder interface { - NodeAdder - WeightedLineAdder -} - -// UndirectedMultgraphBuilder is an undirected multigraph builder. -type UndirectedMultigraphBuilder interface { - UndirectedMultigraph - MultigraphBuilder -} - -// UndirectedWeightedMultigraphBuilder is an undirected weighted multigraph builder. -type UndirectedWeightedMultigraphBuilder interface { - UndirectedMultigraph - WeightedMultigraphBuilder -} - -// DirectedMultigraphBuilder is a directed multigraph builder. -type DirectedMultigraphBuilder interface { - DirectedMultigraph - MultigraphBuilder -} - -// DirectedWeightedMultigraphBuilder is a directed weighted multigraph builder. -type DirectedWeightedMultigraphBuilder interface { - DirectedMultigraph - WeightedMultigraphBuilder -} diff --git a/vendor/gonum.org/v1/gonum/graph/nodes_edges.go b/vendor/gonum.org/v1/gonum/graph/nodes_edges.go deleted file mode 100644 index 3d5dae1fa..000000000 --- a/vendor/gonum.org/v1/gonum/graph/nodes_edges.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graph - -// Iterator is an item iterator. -type Iterator interface { - // Next advances the iterator and returns whether - // the next call to the item method will return a - // non-nil item. - // - // Next should be called prior to any call to the - // iterator's item retrieval method after the - // iterator has been obtained or reset. - // - // The order of iteration is implementation - // dependent. - Next() bool - - // Len returns the number of items remaining in the - // iterator. - // - // If the number of items in the iterator is unknown, - // too large to materialize or too costly to calculate - // then Len may return a negative value. - // In this case the consuming function must be able - // to operate on the items of the iterator directly - // without materializing the items into a slice. - // The magnitude of a negative length has - // implementation-dependent semantics. - Len() int - - // Reset returns the iterator to its start position. - Reset() -} - -// Nodes is a Node iterator. -type Nodes interface { - Iterator - - // Node returns the current Node from the iterator. - Node() Node -} - -// NodeSlicer wraps the NodeSlice method. -type NodeSlicer interface { - // NodeSlice returns the set of nodes remaining - // to be iterated by a Nodes iterator. - // The holder of the iterator may arbitrarily - // change elements in the returned slice, but - // those changes may be reflected to other - // iterators. - NodeSlice() []Node -} - -// NodesOf returns it.Len() nodes from it. If it is a NodeSlicer, the NodeSlice method -// is used to obtain the nodes. It is safe to pass a nil Nodes to NodesOf. -// -// If the Nodes has an indeterminate length, NodesOf will panic. -func NodesOf(it Nodes) []Node { - if it == nil { - return nil - } - len := it.Len() - switch { - case len == 0: - return nil - case len < 0: - panic("graph: called NodesOf on indeterminate iterator") - } - switch it := it.(type) { - case NodeSlicer: - return it.NodeSlice() - } - n := make([]Node, 0, len) - for it.Next() { - n = append(n, it.Node()) - } - return n -} - -// Edges is an Edge iterator. -type Edges interface { - Iterator - - // Edge returns the current Edge from the iterator. - Edge() Edge -} - -// EdgeSlicer wraps the EdgeSlice method. -type EdgeSlicer interface { - // EdgeSlice returns the set of edges remaining - // to be iterated by an Edges iterator. - // The holder of the iterator may arbitrarily - // change elements in the returned slice, but - // those changes may be reflected to other - // iterators. - EdgeSlice() []Edge -} - -// EdgesOf returns it.Len() nodes from it. If it is an EdgeSlicer, the EdgeSlice method is used -// to obtain the edges. It is safe to pass a nil Edges to EdgesOf. -// -// If the Edges has an indeterminate length, EdgesOf will panic. -func EdgesOf(it Edges) []Edge { - if it == nil { - return nil - } - len := it.Len() - switch { - case len == 0: - return nil - case len < 0: - panic("graph: called EdgesOf on indeterminate iterator") - } - switch it := it.(type) { - case EdgeSlicer: - return it.EdgeSlice() - } - e := make([]Edge, 0, len) - for it.Next() { - e = append(e, it.Edge()) - } - return e -} - -// WeightedEdges is a WeightedEdge iterator. -type WeightedEdges interface { - Iterator - - // Edge returns the current Edge from the iterator. - WeightedEdge() WeightedEdge -} - -// WeightedEdgeSlicer wraps the WeightedEdgeSlice method. -type WeightedEdgeSlicer interface { - // EdgeSlice returns the set of edges remaining - // to be iterated by an Edges iterator. - // The holder of the iterator may arbitrarily - // change elements in the returned slice, but - // those changes may be reflected to other - // iterators. - WeightedEdgeSlice() []WeightedEdge -} - -// WeightedEdgesOf returns it.Len() weighted edge from it. If it is a WeightedEdgeSlicer, the -// WeightedEdgeSlice method is used to obtain the edges. It is safe to pass a nil WeightedEdges -// to WeightedEdgesOf. -// -// If the WeightedEdges has an indeterminate length, WeightedEdgesOf will panic. -func WeightedEdgesOf(it WeightedEdges) []WeightedEdge { - if it == nil { - return nil - } - len := it.Len() - switch { - case len == 0: - return nil - case len < 0: - panic("graph: called WeightedEdgesOf on indeterminate iterator") - } - switch it := it.(type) { - case WeightedEdgeSlicer: - return it.WeightedEdgeSlice() - } - e := make([]WeightedEdge, 0, len) - for it.Next() { - e = append(e, it.WeightedEdge()) - } - return e -} - -// Lines is a Line iterator. -type Lines interface { - Iterator - - // Line returns the current Line from the iterator. - Line() Line -} - -// LineSlicer wraps the LineSlice method. -type LineSlicer interface { - // LineSlice returns the set of lines remaining - // to be iterated by an Lines iterator. - // The holder of the iterator may arbitrarily - // change elements in the returned slice, but - // those changes may be reflected to other - // iterators. - LineSlice() []Line -} - -// LinesOf returns it.Len() nodes from it. If it is a LineSlicer, the LineSlice method is used -// to obtain the lines. It is safe to pass a nil Lines to LinesOf. -// -// If the Lines has an indeterminate length, LinesOf will panic. -func LinesOf(it Lines) []Line { - if it == nil { - return nil - } - len := it.Len() - switch { - case len == 0: - return nil - case len < 0: - panic("graph: called LinesOf on indeterminate iterator") - } - switch it := it.(type) { - case LineSlicer: - return it.LineSlice() - } - l := make([]Line, 0, len) - for it.Next() { - l = append(l, it.Line()) - } - return l -} - -// WeightedLines is a WeightedLine iterator. -type WeightedLines interface { - Iterator - - // Line returns the current Line from the iterator. - WeightedLine() WeightedLine -} - -// WeightedLineSlicer wraps the WeightedLineSlice method. -type WeightedLineSlicer interface { - // LineSlice returns the set of lines remaining - // to be iterated by an Lines iterator. - // The holder of the iterator may arbitrarily - // change elements in the returned slice, but - // those changes may be reflected to other - // iterators. - WeightedLineSlice() []WeightedLine -} - -// WeightedLinesOf returns it.Len() weighted line from it. If it is a WeightedLineSlicer, the -// WeightedLineSlice method is used to obtain the lines. It is safe to pass a nil WeightedLines -// to WeightedLinesOf. -// -// If the WeightedLines has an indeterminate length, WeightedLinesOf will panic. -func WeightedLinesOf(it WeightedLines) []WeightedLine { - if it == nil { - return nil - } - len := it.Len() - switch { - case len == 0: - return nil - case len < 0: - panic("graph: called WeightedLinesOf on indeterminate iterator") - } - switch it := it.(type) { - case WeightedLineSlicer: - return it.WeightedLineSlice() - } - l := make([]WeightedLine, 0, len) - for it.Next() { - l = append(l, it.WeightedLine()) - } - return l -} - -// Empty is an empty set of nodes, edges or lines. It should be used when -// a graph returns a zero-length Iterator. Empty implements the slicer -// interfaces for nodes, edges and lines, returning nil for each of these. -const Empty = nothing - -var ( - _ Iterator = Empty - _ Nodes = Empty - _ NodeSlicer = Empty - _ Edges = Empty - _ EdgeSlicer = Empty - _ WeightedEdges = Empty - _ WeightedEdgeSlicer = Empty - _ Lines = Empty - _ LineSlicer = Empty - _ WeightedLines = Empty - _ WeightedLineSlicer = Empty -) - -const nothing = empty(true) - -type empty bool - -func (empty) Next() bool { return false } -func (empty) Len() int { return 0 } -func (empty) Reset() {} -func (empty) Node() Node { return nil } -func (empty) NodeSlice() []Node { return nil } -func (empty) Edge() Edge { return nil } -func (empty) EdgeSlice() []Edge { return nil } -func (empty) WeightedEdge() WeightedEdge { return nil } -func (empty) WeightedEdgeSlice() []WeightedEdge { return nil } -func (empty) Line() Line { return nil } -func (empty) LineSlice() []Line { return nil } -func (empty) WeightedLine() WeightedLine { return nil } -func (empty) WeightedLineSlice() []WeightedLine { return nil } diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go deleted file mode 100644 index 3daca9ade..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simple - -import ( - "sort" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" - "gonum.org/v1/gonum/graph/iterator" - "gonum.org/v1/gonum/mat" -) - -var ( - dm *DirectedMatrix - - _ graph.Graph = dm - _ graph.Directed = dm - _ edgeSetter = dm - _ weightedEdgeSetter = dm -) - -// DirectedMatrix represents a directed graph using an adjacency -// matrix such that all IDs are in a contiguous block from 0 to n-1. -// Edges are stored implicitly as an edge weight, so edges stored in -// the graph are not recoverable. -type DirectedMatrix struct { - mat *mat.Dense - nodes []graph.Node - - self float64 - absent float64 -} - -// NewDirectedMatrix creates a directed dense graph with n nodes. -// All edges are initialized with the weight given by init. The self parameter -// specifies the cost of self connection, and absent specifies the weight -// returned for absent edges. -func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix { - matrix := make([]float64, n*n) - if init != 0 { - for i := range matrix { - matrix[i] = init - } - } - for i := 0; i < len(matrix); i += n + 1 { - matrix[i] = self - } - return &DirectedMatrix{ - mat: mat.NewDense(n, n, matrix), - self: self, - absent: absent, - } -} - -// NewDirectedMatrixFrom creates a directed dense graph with the given nodes. -// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may -// be in any order. If IDs are not contiguous NewDirectedMatrixFrom will panic. -// All edges are initialized with the weight given by init. The self parameter -// specifies the cost of self connection, and absent specifies the weight -// returned for absent edges. -func NewDirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *DirectedMatrix { - sort.Sort(ordered.ByID(nodes)) - for i, n := range nodes { - if int64(i) != n.ID() { - panic("simple: non-contiguous node IDs") - } - } - g := NewDirectedMatrix(len(nodes), init, self, absent) - g.nodes = nodes - return g -} - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *DirectedMatrix) Edge(uid, vid int64) graph.Edge { - return g.WeightedEdge(uid, vid) -} - -// Edges returns all the edges in the graph. -func (g *DirectedMatrix) Edges() graph.Edges { - var edges []graph.Edge - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - for j := 0; j < r; j++ { - if i == j { - continue - } - if w := g.mat.At(i, j); !isSame(w, g.absent) { - edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) - } - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedEdges(edges) -} - -// From returns all nodes in g that can be reached directly from n. -func (g *DirectedMatrix) From(id int64) graph.Nodes { - if !g.has(id) { - return graph.Empty - } - var nodes []graph.Node - _, c := g.mat.Dims() - for j := 0; j < c; j++ { - if int64(j) == id { - continue - } - // id is not greater than maximum int by this point. - if !isSame(g.mat.At(int(id), j), g.absent) { - nodes = append(nodes, g.Node(int64(j))) - } - } - if len(nodes) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(nodes) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y without -// considering direction. -func (g *DirectedMatrix) HasEdgeBetween(xid, yid int64) bool { - if !g.has(xid) { - return false - } - if !g.has(yid) { - return false - } - // xid and yid are not greater than maximum int by this point. - return xid != yid && (!isSame(g.mat.At(int(xid), int(yid)), g.absent) || !isSame(g.mat.At(int(yid), int(xid)), g.absent)) -} - -// HasEdgeFromTo returns whether an edge exists in the graph from u to v. -func (g *DirectedMatrix) HasEdgeFromTo(uid, vid int64) bool { - if !g.has(uid) { - return false - } - if !g.has(vid) { - return false - } - // uid and vid are not greater than maximum int by this point. - return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) -} - -// Matrix returns the mat.Matrix representation of the graph. The orientation -// of the matrix is such that the matrix entry at G_{ij} is the weight of the edge -// from node i to node j. -func (g *DirectedMatrix) Matrix() mat.Matrix { - // Prevent alteration of dimensions of the returned matrix. - m := *g.mat - return &m -} - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g *DirectedMatrix) Node(id int64) graph.Node { - if !g.has(id) { - return nil - } - if g.nodes == nil { - return Node(id) - } - return g.nodes[id] -} - -// Nodes returns all the nodes in the graph. -func (g *DirectedMatrix) Nodes() graph.Nodes { - if g.nodes != nil { - nodes := make([]graph.Node, len(g.nodes)) - copy(nodes, g.nodes) - return iterator.NewOrderedNodes(nodes) - } - r, _ := g.mat.Dims() - // Matrix graphs must have at least one node. - return iterator.NewImplicitNodes(0, r, newSimpleNode) -} - -// RemoveEdge removes the edge with the given end point nodes from the graph, leaving the terminal -// nodes. If the edge does not exist it is a no-op. -func (g *DirectedMatrix) RemoveEdge(fid, tid int64) { - if !g.has(fid) { - return - } - if !g.has(tid) { - return - } - // fid and tid are not greater than maximum int by this point. - g.mat.Set(int(fid), int(tid), g.absent) -} - -// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge -// are not in g or the edge is a self loop, SetEdge panics. SetEdge will store the nodes of -// e in the graph if it was initialized with NewDirectedMatrixFrom. -func (g *DirectedMatrix) SetEdge(e graph.Edge) { - g.setWeightedEdge(e, 1) -} - -// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g -// or the edge is a self loop, SetWeightedEdge panics. SetWeightedEdge will store the nodes of -// e in the graph if it was initialized with NewDirectedMatrixFrom. -func (g *DirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { - g.setWeightedEdge(e, e.Weight()) -} - -func (g *DirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { - from := e.From() - fid := from.ID() - to := e.To() - tid := to.ID() - if fid == tid { - panic("simple: set illegal edge") - } - if int64(int(fid)) != fid { - panic("simple: unavailable from node ID for dense graph") - } - if int64(int(tid)) != tid { - panic("simple: unavailable to node ID for dense graph") - } - if g.nodes != nil { - g.nodes[fid] = from - g.nodes[tid] = to - } - // fid and tid are not greater than maximum int by this point. - g.mat.Set(int(fid), int(tid), weight) -} - -// To returns all nodes in g that can reach directly to n. -func (g *DirectedMatrix) To(id int64) graph.Nodes { - if !g.has(id) { - return graph.Empty - } - var nodes []graph.Node - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - if int64(i) == id { - continue - } - // id is not greater than maximum int by this point. - if !isSame(g.mat.At(i, int(id)), g.absent) { - nodes = append(nodes, g.Node(int64(i))) - } - } - if len(nodes) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(nodes) -} - -// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. -// If x and y are the same node or there is no joining edge between the two nodes the weight -// value returned is either the graph's absent or self value. Weight returns true if an edge -// exists between x and y or if x and y have the same ID, false otherwise. -func (g *DirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { - if xid == yid { - return g.self, true - } - if g.HasEdgeFromTo(xid, yid) { - // xid and yid are not greater than maximum int by this point. - return g.mat.At(int(xid), int(yid)), true - } - return g.absent, false -} - -// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *DirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { - if g.HasEdgeFromTo(uid, vid) { - // xid and yid are not greater than maximum int by this point. - return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} - } - return nil -} - -// WeightedEdges returns all the edges in the graph. -func (g *DirectedMatrix) WeightedEdges() graph.WeightedEdges { - var edges []graph.WeightedEdge - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - for j := 0; j < r; j++ { - if i == j { - continue - } - if w := g.mat.At(i, j); !isSame(w, g.absent) { - edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) - } - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedWeightedEdges(edges) -} - -func (g *DirectedMatrix) has(id int64) bool { - r, _ := g.mat.Dims() - return 0 <= id && id < int64(r) -} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go deleted file mode 100644 index f51debb4f..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simple - -import ( - "sort" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" - "gonum.org/v1/gonum/graph/iterator" - "gonum.org/v1/gonum/mat" -) - -var ( - um *UndirectedMatrix - - _ graph.Graph = um - _ graph.Undirected = um - _ edgeSetter = um - _ weightedEdgeSetter = um -) - -// UndirectedMatrix represents an undirected graph using an adjacency -// matrix such that all IDs are in a contiguous block from 0 to n-1. -// Edges are stored implicitly as an edge weight, so edges stored in -// the graph are not recoverable. -type UndirectedMatrix struct { - mat *mat.SymDense - nodes []graph.Node - - self float64 - absent float64 -} - -// NewUndirectedMatrix creates an undirected dense graph with n nodes. -// All edges are initialized with the weight given by init. The self parameter -// specifies the cost of self connection, and absent specifies the weight -// returned for absent edges. -func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix { - matrix := make([]float64, n*n) - if init != 0 { - for i := range matrix { - matrix[i] = init - } - } - for i := 0; i < len(matrix); i += n + 1 { - matrix[i] = self - } - return &UndirectedMatrix{ - mat: mat.NewSymDense(n, matrix), - self: self, - absent: absent, - } -} - -// NewUndirectedMatrixFrom creates an undirected dense graph with the given nodes. -// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may -// be in any order. If IDs are not contiguous NewUndirectedMatrixFrom will panic. -// All edges are initialized with the weight given by init. The self parameter -// specifies the cost of self connection, and absent specifies the weight -// returned for absent edges. -func NewUndirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *UndirectedMatrix { - sort.Sort(ordered.ByID(nodes)) - for i, n := range nodes { - if int64(i) != n.ID() { - panic("simple: non-contiguous node IDs") - } - } - g := NewUndirectedMatrix(len(nodes), init, self, absent) - g.nodes = nodes - return g -} - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *UndirectedMatrix) Edge(uid, vid int64) graph.Edge { - return g.WeightedEdgeBetween(uid, vid) -} - -// EdgeBetween returns the edge between nodes x and y. -func (g *UndirectedMatrix) EdgeBetween(uid, vid int64) graph.Edge { - return g.WeightedEdgeBetween(uid, vid) -} - -// Edges returns all the edges in the graph. -func (g *UndirectedMatrix) Edges() graph.Edges { - var edges []graph.Edge - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - for j := i + 1; j < r; j++ { - if w := g.mat.At(i, j); !isSame(w, g.absent) { - edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) - } - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedEdges(edges) -} - -// From returns all nodes in g that can be reached directly from n. -func (g *UndirectedMatrix) From(id int64) graph.Nodes { - if !g.has(id) { - return graph.Empty - } - var nodes []graph.Node - r := g.mat.Symmetric() - for i := 0; i < r; i++ { - if int64(i) == id { - continue - } - // id is not greater than maximum int by this point. - if !isSame(g.mat.At(int(id), i), g.absent) { - nodes = append(nodes, g.Node(int64(i))) - } - } - if len(nodes) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(nodes) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y. -func (g *UndirectedMatrix) HasEdgeBetween(uid, vid int64) bool { - if !g.has(uid) { - return false - } - if !g.has(vid) { - return false - } - // uid and vid are not greater than maximum int by this point. - return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) -} - -// Matrix returns the mat.Matrix representation of the graph. -func (g *UndirectedMatrix) Matrix() mat.Matrix { - // Prevent alteration of dimensions of the returned matrix. - m := *g.mat - return &m -} - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g *UndirectedMatrix) Node(id int64) graph.Node { - if !g.has(id) { - return nil - } - if g.nodes == nil { - return Node(id) - } - return g.nodes[id] -} - -// Nodes returns all the nodes in the graph. -func (g *UndirectedMatrix) Nodes() graph.Nodes { - if g.nodes != nil { - nodes := make([]graph.Node, len(g.nodes)) - copy(nodes, g.nodes) - return iterator.NewOrderedNodes(nodes) - } - r := g.mat.Symmetric() - // Matrix graphs must have at least one node. - return iterator.NewImplicitNodes(0, r, newSimpleNode) -} - -// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal -// nodes. If the edge does not exist it is a no-op. -func (g *UndirectedMatrix) RemoveEdge(fid, tid int64) { - if !g.has(fid) { - return - } - if !g.has(tid) { - return - } - // fid and tid are not greater than maximum int by this point. - g.mat.SetSym(int(fid), int(tid), g.absent) -} - -// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge are -// not in g or the edge is a self loop, SetEdge panics. SetEdge will store the nodes of -// e in the graph if it was initialized with NewUndirectedMatrixFrom. -func (g *UndirectedMatrix) SetEdge(e graph.Edge) { - g.setWeightedEdge(e, 1) -} - -// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g -// or the edge is a self loop, SetWeightedEdge panics. SetWeightedEdge will store the nodes of -// e in the graph if it was initialized with NewUndirectedMatrixFrom. -func (g *UndirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { - g.setWeightedEdge(e, e.Weight()) -} - -func (g *UndirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { - from := e.From() - fid := from.ID() - to := e.To() - tid := to.ID() - if fid == tid { - panic("simple: set illegal edge") - } - if int64(int(fid)) != fid { - panic("simple: unavailable from node ID for dense graph") - } - if int64(int(tid)) != tid { - panic("simple: unavailable to node ID for dense graph") - } - if g.nodes != nil { - g.nodes[fid] = from - g.nodes[tid] = to - } - // fid and tid are not greater than maximum int by this point. - g.mat.SetSym(int(fid), int(tid), weight) -} - -// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. -// If x and y are the same node or there is no joining edge between the two nodes the weight -// value returned is either the graph's absent or self value. Weight returns true if an edge -// exists between x and y or if x and y have the same ID, false otherwise. -func (g *UndirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { - if xid == yid { - return g.self, true - } - if g.HasEdgeBetween(xid, yid) { - // xid and yid are not greater than maximum int by this point. - return g.mat.At(int(xid), int(yid)), true - } - return g.absent, false -} - -// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *UndirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { - return g.WeightedEdgeBetween(uid, vid) -} - -// WeightedEdgeBetween returns the weighted edge between nodes x and y. -func (g *UndirectedMatrix) WeightedEdgeBetween(uid, vid int64) graph.WeightedEdge { - if g.HasEdgeBetween(uid, vid) { - // uid and vid are not greater than maximum int by this point. - return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} - } - return nil -} - -// WeightedEdges returns all the edges in the graph. -func (g *UndirectedMatrix) WeightedEdges() graph.WeightedEdges { - var edges []graph.WeightedEdge - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - for j := i + 1; j < r; j++ { - if w := g.mat.At(i, j); !isSame(w, g.absent) { - edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) - } - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedWeightedEdges(edges) -} - -func (g *UndirectedMatrix) has(id int64) bool { - r := g.mat.Symmetric() - return 0 <= id && id < int64(r) -} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/directed.go b/vendor/gonum.org/v1/gonum/graph/simple/directed.go deleted file mode 100644 index f19efbd0a..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/directed.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simple - -import ( - "fmt" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/uid" - "gonum.org/v1/gonum/graph/iterator" -) - -var ( - dg *DirectedGraph - - _ graph.Graph = dg - _ graph.Directed = dg - _ graph.NodeAdder = dg - _ graph.NodeRemover = dg - _ graph.EdgeAdder = dg - _ graph.EdgeRemover = dg -) - -// DirectedGraph implements a generalized directed graph. -type DirectedGraph struct { - nodes map[int64]graph.Node - from map[int64]map[int64]graph.Edge - to map[int64]map[int64]graph.Edge - - nodeIDs uid.Set -} - -// NewDirectedGraph returns a DirectedGraph. -func NewDirectedGraph() *DirectedGraph { - return &DirectedGraph{ - nodes: make(map[int64]graph.Node), - from: make(map[int64]map[int64]graph.Edge), - to: make(map[int64]map[int64]graph.Edge), - - nodeIDs: uid.NewSet(), - } -} - -// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. -func (g *DirectedGraph) AddNode(n graph.Node) { - if _, exists := g.nodes[n.ID()]; exists { - panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) - } - g.nodes[n.ID()] = n - g.from[n.ID()] = make(map[int64]graph.Edge) - g.to[n.ID()] = make(map[int64]graph.Edge) - g.nodeIDs.Use(n.ID()) -} - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *DirectedGraph) Edge(uid, vid int64) graph.Edge { - edge, ok := g.from[uid][vid] - if !ok { - return nil - } - return edge -} - -// Edges returns all the edges in the graph. -func (g *DirectedGraph) Edges() graph.Edges { - var edges []graph.Edge - for _, u := range g.nodes { - for _, e := range g.from[u.ID()] { - edges = append(edges, e) - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedEdges(edges) -} - -// From returns all nodes in g that can be reached directly from n. -func (g *DirectedGraph) From(id int64) graph.Nodes { - if _, ok := g.from[id]; !ok { - return graph.Empty - } - - from := make([]graph.Node, len(g.from[id])) - i := 0 - for vid := range g.from[id] { - from[i] = g.nodes[vid] - i++ - } - if len(from) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(from) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y without -// considering direction. -func (g *DirectedGraph) HasEdgeBetween(xid, yid int64) bool { - if _, ok := g.from[xid][yid]; ok { - return true - } - _, ok := g.from[yid][xid] - return ok -} - -// HasEdgeFromTo returns whether an edge exists in the graph from u to v. -func (g *DirectedGraph) HasEdgeFromTo(uid, vid int64) bool { - if _, ok := g.from[uid][vid]; !ok { - return false - } - return true -} - -// NewEdge returns a new Edge from the source to the destination node. -func (g *DirectedGraph) NewEdge(from, to graph.Node) graph.Edge { - return &Edge{F: from, T: to} -} - -// NewNode returns a new unique Node to be added to g. The Node's ID does -// not become valid in g until the Node is added to g. -func (g *DirectedGraph) NewNode() graph.Node { - if len(g.nodes) == 0 { - return Node(0) - } - if int64(len(g.nodes)) == uid.Max { - panic("simple: cannot allocate node: no slot") - } - return Node(g.nodeIDs.NewID()) -} - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g *DirectedGraph) Node(id int64) graph.Node { - return g.nodes[id] -} - -// Nodes returns all the nodes in the graph. -func (g *DirectedGraph) Nodes() graph.Nodes { - if len(g.nodes) == 0 { - return graph.Empty - } - nodes := make([]graph.Node, len(g.nodes)) - i := 0 - for _, n := range g.nodes { - nodes[i] = n - i++ - } - return iterator.NewOrderedNodes(nodes) -} - -// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal -// nodes. If the edge does not exist it is a no-op. -func (g *DirectedGraph) RemoveEdge(fid, tid int64) { - if _, ok := g.nodes[fid]; !ok { - return - } - if _, ok := g.nodes[tid]; !ok { - return - } - - delete(g.from[fid], tid) - delete(g.to[tid], fid) -} - -// RemoveNode removes the node with the given ID from the graph, as well as any edges attached -// to it. If the node is not in the graph it is a no-op. -func (g *DirectedGraph) RemoveNode(id int64) { - if _, ok := g.nodes[id]; !ok { - return - } - delete(g.nodes, id) - - for from := range g.from[id] { - delete(g.to[from], id) - } - delete(g.from, id) - - for to := range g.to[id] { - delete(g.from[to], id) - } - delete(g.to, id) - - g.nodeIDs.Release(id) -} - -// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added -// and are set to the nodes of the edge otherwise. -// It will panic if the IDs of the e.From and e.To are equal. -func (g *DirectedGraph) SetEdge(e graph.Edge) { - var ( - from = e.From() - fid = from.ID() - to = e.To() - tid = to.ID() - ) - - if fid == tid { - panic("simple: adding self edge") - } - - if _, ok := g.nodes[fid]; !ok { - g.AddNode(from) - } else { - g.nodes[fid] = from - } - if _, ok := g.nodes[tid]; !ok { - g.AddNode(to) - } else { - g.nodes[tid] = to - } - - g.from[fid][tid] = e - g.to[tid][fid] = e -} - -// To returns all nodes in g that can reach directly to n. -func (g *DirectedGraph) To(id int64) graph.Nodes { - if _, ok := g.from[id]; !ok { - return graph.Empty - } - - to := make([]graph.Node, len(g.to[id])) - i := 0 - for uid := range g.to[id] { - to[i] = g.nodes[uid] - i++ - } - if len(to) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(to) -} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/doc.go b/vendor/gonum.org/v1/gonum/graph/simple/doc.go deleted file mode 100644 index dc3f24c54..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package simple provides a suite of simple graph implementations satisfying -// the gonum/graph interfaces. -// -// All types in simple return the graph.Empty value for empty iterators. -package simple // import "gonum.org/v1/gonum/graph/simple" diff --git a/vendor/gonum.org/v1/gonum/graph/simple/simple.go b/vendor/gonum.org/v1/gonum/graph/simple/simple.go deleted file mode 100644 index 3b4576587..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/simple.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simple - -import ( - "math" - - "gonum.org/v1/gonum/graph" -) - -// Node is a simple graph node. -type Node int64 - -// ID returns the ID number of the node. -func (n Node) ID() int64 { - return int64(n) -} - -func newSimpleNode(id int) graph.Node { - return Node(id) -} - -// Edge is a simple graph edge. -type Edge struct { - F, T graph.Node -} - -// From returns the from-node of the edge. -func (e Edge) From() graph.Node { return e.F } - -// To returns the to-node of the edge. -func (e Edge) To() graph.Node { return e.T } - -// ReversedLine returns a new Edge with the F and T fields -// swapped. -func (e Edge) ReversedEdge() graph.Edge { return Edge{F: e.T, T: e.F} } - -// WeightedEdge is a simple weighted graph edge. -type WeightedEdge struct { - F, T graph.Node - W float64 -} - -// From returns the from-node of the edge. -func (e WeightedEdge) From() graph.Node { return e.F } - -// To returns the to-node of the edge. -func (e WeightedEdge) To() graph.Node { return e.T } - -// ReversedLine returns a new Edge with the F and T fields -// swapped. The weight of the new Edge is the same as -// the weight of the receiver. -func (e WeightedEdge) ReversedEdge() graph.Edge { return WeightedEdge{F: e.T, T: e.F, W: e.W} } - -// Weight returns the weight of the edge. -func (e WeightedEdge) Weight() float64 { return e.W } - -// isSame returns whether two float64 values are the same where NaN values -// are equalable. -func isSame(a, b float64) bool { - return a == b || (math.IsNaN(a) && math.IsNaN(b)) -} - -type edgeSetter interface { - SetEdge(e graph.Edge) -} - -type weightedEdgeSetter interface { - SetWeightedEdge(e graph.WeightedEdge) -} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/undirected.go b/vendor/gonum.org/v1/gonum/graph/simple/undirected.go deleted file mode 100644 index 841a8e380..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/undirected.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simple - -import ( - "fmt" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/uid" - "gonum.org/v1/gonum/graph/iterator" -) - -var ( - ug *UndirectedGraph - - _ graph.Graph = ug - _ graph.Undirected = ug - _ graph.NodeAdder = ug - _ graph.NodeRemover = ug - _ graph.EdgeAdder = ug - _ graph.EdgeRemover = ug -) - -// UndirectedGraph implements a generalized undirected graph. -type UndirectedGraph struct { - nodes map[int64]graph.Node - edges map[int64]map[int64]graph.Edge - - nodeIDs uid.Set -} - -// NewUndirectedGraph returns an UndirectedGraph. -func NewUndirectedGraph() *UndirectedGraph { - return &UndirectedGraph{ - nodes: make(map[int64]graph.Node), - edges: make(map[int64]map[int64]graph.Edge), - - nodeIDs: uid.NewSet(), - } -} - -// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. -func (g *UndirectedGraph) AddNode(n graph.Node) { - if _, exists := g.nodes[n.ID()]; exists { - panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) - } - g.nodes[n.ID()] = n - g.edges[n.ID()] = make(map[int64]graph.Edge) - g.nodeIDs.Use(n.ID()) -} - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *UndirectedGraph) Edge(uid, vid int64) graph.Edge { - return g.EdgeBetween(uid, vid) -} - -// EdgeBetween returns the edge between nodes x and y. -func (g *UndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { - edge, ok := g.edges[xid][yid] - if !ok { - return nil - } - if edge.From().ID() == xid { - return edge - } - return edge.ReversedEdge() -} - -// Edges returns all the edges in the graph. -func (g *UndirectedGraph) Edges() graph.Edges { - if len(g.edges) == 0 { - return graph.Empty - } - var edges []graph.Edge - seen := make(map[[2]int64]struct{}) - for _, u := range g.edges { - for _, e := range u { - uid := e.From().ID() - vid := e.To().ID() - if _, ok := seen[[2]int64{uid, vid}]; ok { - continue - } - seen[[2]int64{uid, vid}] = struct{}{} - seen[[2]int64{vid, uid}] = struct{}{} - edges = append(edges, e) - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedEdges(edges) -} - -// From returns all nodes in g that can be reached directly from n. -func (g *UndirectedGraph) From(id int64) graph.Nodes { - if _, ok := g.nodes[id]; !ok { - return graph.Empty - } - - nodes := make([]graph.Node, len(g.edges[id])) - i := 0 - for from := range g.edges[id] { - nodes[i] = g.nodes[from] - i++ - } - if len(nodes) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(nodes) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y. -func (g *UndirectedGraph) HasEdgeBetween(xid, yid int64) bool { - _, ok := g.edges[xid][yid] - return ok -} - -// NewEdge returns a new Edge from the source to the destination node. -func (g *UndirectedGraph) NewEdge(from, to graph.Node) graph.Edge { - return &Edge{F: from, T: to} -} - -// NewNode returns a new unique Node to be added to g. The Node's ID does -// not become valid in g until the Node is added to g. -func (g *UndirectedGraph) NewNode() graph.Node { - if len(g.nodes) == 0 { - return Node(0) - } - if int64(len(g.nodes)) == uid.Max { - panic("simple: cannot allocate node: no slot") - } - return Node(g.nodeIDs.NewID()) -} - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g *UndirectedGraph) Node(id int64) graph.Node { - return g.nodes[id] -} - -// Nodes returns all the nodes in the graph. -func (g *UndirectedGraph) Nodes() graph.Nodes { - if len(g.nodes) == 0 { - return graph.Empty - } - nodes := make([]graph.Node, len(g.nodes)) - i := 0 - for _, n := range g.nodes { - nodes[i] = n - i++ - } - return iterator.NewOrderedNodes(nodes) -} - -// RemoveEdge removes the edge with the given end IDs from the graph, leaving the terminal nodes. -// If the edge does not exist it is a no-op. -func (g *UndirectedGraph) RemoveEdge(fid, tid int64) { - if _, ok := g.nodes[fid]; !ok { - return - } - if _, ok := g.nodes[tid]; !ok { - return - } - - delete(g.edges[fid], tid) - delete(g.edges[tid], fid) -} - -// RemoveNode removes the node with the given ID from the graph, as well as any edges attached -// to it. If the node is not in the graph it is a no-op. -func (g *UndirectedGraph) RemoveNode(id int64) { - if _, ok := g.nodes[id]; !ok { - return - } - delete(g.nodes, id) - - for from := range g.edges[id] { - delete(g.edges[from], id) - } - delete(g.edges, id) - - g.nodeIDs.Release(id) -} - -// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added -// and are set to the nodes of the edge otherwise. -// It will panic if the IDs of the e.From and e.To are equal. -func (g *UndirectedGraph) SetEdge(e graph.Edge) { - var ( - from = e.From() - fid = from.ID() - to = e.To() - tid = to.ID() - ) - - if fid == tid { - panic("simple: adding self edge") - } - - if _, ok := g.nodes[fid]; !ok { - g.AddNode(from) - } else { - g.nodes[fid] = from - } - if _, ok := g.nodes[tid]; !ok { - g.AddNode(to) - } else { - g.nodes[tid] = to - } - - g.edges[fid][tid] = e - g.edges[tid][fid] = e -} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go b/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go deleted file mode 100644 index 92bd2842f..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simple - -import ( - "fmt" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/uid" - "gonum.org/v1/gonum/graph/iterator" -) - -var ( - wdg *WeightedDirectedGraph - - _ graph.Graph = wdg - _ graph.Weighted = wdg - _ graph.Directed = wdg - _ graph.WeightedDirected = wdg - _ graph.NodeAdder = wdg - _ graph.NodeRemover = wdg - _ graph.WeightedEdgeAdder = wdg - _ graph.EdgeRemover = wdg -) - -// WeightedDirectedGraph implements a generalized weighted directed graph. -type WeightedDirectedGraph struct { - nodes map[int64]graph.Node - from map[int64]map[int64]graph.WeightedEdge - to map[int64]map[int64]graph.WeightedEdge - - self, absent float64 - - nodeIDs uid.Set -} - -// NewWeightedDirectedGraph returns a WeightedDirectedGraph with the specified self and absent -// edge weight values. -func NewWeightedDirectedGraph(self, absent float64) *WeightedDirectedGraph { - return &WeightedDirectedGraph{ - nodes: make(map[int64]graph.Node), - from: make(map[int64]map[int64]graph.WeightedEdge), - to: make(map[int64]map[int64]graph.WeightedEdge), - - self: self, - absent: absent, - - nodeIDs: uid.NewSet(), - } -} - -// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. -func (g *WeightedDirectedGraph) AddNode(n graph.Node) { - if _, exists := g.nodes[n.ID()]; exists { - panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) - } - g.nodes[n.ID()] = n - g.from[n.ID()] = make(map[int64]graph.WeightedEdge) - g.to[n.ID()] = make(map[int64]graph.WeightedEdge) - g.nodeIDs.Use(n.ID()) -} - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *WeightedDirectedGraph) Edge(uid, vid int64) graph.Edge { - return g.WeightedEdge(uid, vid) -} - -// Edges returns all the edges in the graph. -func (g *WeightedDirectedGraph) Edges() graph.Edges { - var edges []graph.Edge - for _, u := range g.nodes { - for _, e := range g.from[u.ID()] { - edges = append(edges, e) - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedEdges(edges) -} - -// From returns all nodes in g that can be reached directly from n. -func (g *WeightedDirectedGraph) From(id int64) graph.Nodes { - if _, ok := g.from[id]; !ok { - return graph.Empty - } - - from := make([]graph.Node, len(g.from[id])) - i := 0 - for vid := range g.from[id] { - from[i] = g.nodes[vid] - i++ - } - if len(from) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(from) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y without -// considering direction. -func (g *WeightedDirectedGraph) HasEdgeBetween(xid, yid int64) bool { - if _, ok := g.from[xid][yid]; ok { - return true - } - _, ok := g.from[yid][xid] - return ok -} - -// HasEdgeFromTo returns whether an edge exists in the graph from u to v. -func (g *WeightedDirectedGraph) HasEdgeFromTo(uid, vid int64) bool { - if _, ok := g.from[uid][vid]; !ok { - return false - } - return true -} - -// NewNode returns a new unique Node to be added to g. The Node's ID does -// not become valid in g until the Node is added to g. -func (g *WeightedDirectedGraph) NewNode() graph.Node { - if len(g.nodes) == 0 { - return Node(0) - } - if int64(len(g.nodes)) == uid.Max { - panic("simple: cannot allocate node: no slot") - } - return Node(g.nodeIDs.NewID()) -} - -// NewWeightedEdge returns a new weighted edge from the source to the destination node. -func (g *WeightedDirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { - return &WeightedEdge{F: from, T: to, W: weight} -} - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g *WeightedDirectedGraph) Node(id int64) graph.Node { - return g.nodes[id] -} - -// Nodes returns all the nodes in the graph. -func (g *WeightedDirectedGraph) Nodes() graph.Nodes { - if len(g.from) == 0 { - return graph.Empty - } - nodes := make([]graph.Node, len(g.nodes)) - i := 0 - for _, n := range g.nodes { - nodes[i] = n - i++ - } - return iterator.NewOrderedNodes(nodes) -} - -// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal -// nodes. If the edge does not exist it is a no-op. -func (g *WeightedDirectedGraph) RemoveEdge(fid, tid int64) { - if _, ok := g.nodes[fid]; !ok { - return - } - if _, ok := g.nodes[tid]; !ok { - return - } - - delete(g.from[fid], tid) - delete(g.to[tid], fid) -} - -// RemoveNode removes the node with the given ID from the graph, as well as any edges attached -// to it. If the node is not in the graph it is a no-op. -func (g *WeightedDirectedGraph) RemoveNode(id int64) { - if _, ok := g.nodes[id]; !ok { - return - } - delete(g.nodes, id) - - for from := range g.from[id] { - delete(g.to[from], id) - } - delete(g.from, id) - - for to := range g.to[id] { - delete(g.from[to], id) - } - delete(g.to, id) - - g.nodeIDs.Release(id) -} - -// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added -// and are set to the nodes of the edge otherwise. -// It will panic if the IDs of the e.From and e.To are equal. -func (g *WeightedDirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { - var ( - from = e.From() - fid = from.ID() - to = e.To() - tid = to.ID() - ) - - if fid == tid { - panic("simple: adding self edge") - } - - if _, ok := g.nodes[fid]; !ok { - g.AddNode(from) - } else { - g.nodes[fid] = from - } - if _, ok := g.nodes[tid]; !ok { - g.AddNode(to) - } else { - g.nodes[tid] = to - } - - g.from[fid][tid] = e - g.to[tid][fid] = e -} - -// To returns all nodes in g that can reach directly to n. -func (g *WeightedDirectedGraph) To(id int64) graph.Nodes { - if _, ok := g.from[id]; !ok { - return graph.Empty - } - - to := make([]graph.Node, len(g.to[id])) - i := 0 - for uid := range g.to[id] { - to[i] = g.nodes[uid] - i++ - } - if len(to) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(to) -} - -// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. -// If x and y are the same node or there is no joining edge between the two nodes the weight -// value returned is either the graph's absent or self value. Weight returns true if an edge -// exists between x and y or if x and y have the same ID, false otherwise. -func (g *WeightedDirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { - if xid == yid { - return g.self, true - } - if to, ok := g.from[xid]; ok { - if e, ok := to[yid]; ok { - return e.Weight(), true - } - } - return g.absent, false -} - -// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *WeightedDirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { - edge, ok := g.from[uid][vid] - if !ok { - return nil - } - return edge -} - -// WeightedEdges returns all the weighted edges in the graph. -func (g *WeightedDirectedGraph) WeightedEdges() graph.WeightedEdges { - var edges []graph.WeightedEdge - for _, u := range g.nodes { - for _, e := range g.from[u.ID()] { - edges = append(edges, e) - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedWeightedEdges(edges) -} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go b/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go deleted file mode 100644 index 593257683..000000000 --- a/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package simple - -import ( - "fmt" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/uid" - "gonum.org/v1/gonum/graph/iterator" -) - -var ( - wug *WeightedUndirectedGraph - - _ graph.Graph = wug - _ graph.Weighted = wug - _ graph.Undirected = wug - _ graph.WeightedUndirected = wug - _ graph.NodeAdder = wug - _ graph.NodeRemover = wug - _ graph.WeightedEdgeAdder = wug - _ graph.EdgeRemover = wug -) - -// WeightedUndirectedGraph implements a generalized weighted undirected graph. -type WeightedUndirectedGraph struct { - nodes map[int64]graph.Node - edges map[int64]map[int64]graph.WeightedEdge - - self, absent float64 - - nodeIDs uid.Set -} - -// NewWeightedUndirectedGraph returns an WeightedUndirectedGraph with the specified self and absent -// edge weight values. -func NewWeightedUndirectedGraph(self, absent float64) *WeightedUndirectedGraph { - return &WeightedUndirectedGraph{ - nodes: make(map[int64]graph.Node), - edges: make(map[int64]map[int64]graph.WeightedEdge), - - self: self, - absent: absent, - - nodeIDs: uid.NewSet(), - } -} - -// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. -func (g *WeightedUndirectedGraph) AddNode(n graph.Node) { - if _, exists := g.nodes[n.ID()]; exists { - panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) - } - g.nodes[n.ID()] = n - g.edges[n.ID()] = make(map[int64]graph.WeightedEdge) - g.nodeIDs.Use(n.ID()) -} - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *WeightedUndirectedGraph) Edge(uid, vid int64) graph.Edge { - return g.WeightedEdgeBetween(uid, vid) -} - -// EdgeBetween returns the edge between nodes x and y. -func (g *WeightedUndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { - return g.WeightedEdgeBetween(xid, yid) -} - -// Edges returns all the edges in the graph. -func (g *WeightedUndirectedGraph) Edges() graph.Edges { - if len(g.edges) == 0 { - return graph.Empty - } - var edges []graph.Edge - seen := make(map[[2]int64]struct{}) - for _, u := range g.edges { - for _, e := range u { - uid := e.From().ID() - vid := e.To().ID() - if _, ok := seen[[2]int64{uid, vid}]; ok { - continue - } - seen[[2]int64{uid, vid}] = struct{}{} - seen[[2]int64{vid, uid}] = struct{}{} - edges = append(edges, e) - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedEdges(edges) -} - -// From returns all nodes in g that can be reached directly from n. -func (g *WeightedUndirectedGraph) From(id int64) graph.Nodes { - if _, ok := g.nodes[id]; !ok { - return graph.Empty - } - - nodes := make([]graph.Node, len(g.edges[id])) - i := 0 - for from := range g.edges[id] { - nodes[i] = g.nodes[from] - i++ - } - if len(nodes) == 0 { - return graph.Empty - } - return iterator.NewOrderedNodes(nodes) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y. -func (g *WeightedUndirectedGraph) HasEdgeBetween(xid, yid int64) bool { - _, ok := g.edges[xid][yid] - return ok -} - -// NewNode returns a new unique Node to be added to g. The Node's ID does -// not become valid in g until the Node is added to g. -func (g *WeightedUndirectedGraph) NewNode() graph.Node { - if len(g.nodes) == 0 { - return Node(0) - } - if int64(len(g.nodes)) == uid.Max { - panic("simple: cannot allocate node: no slot") - } - return Node(g.nodeIDs.NewID()) -} - -// NewWeightedEdge returns a new weighted edge from the source to the destination node. -func (g *WeightedUndirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { - return &WeightedEdge{F: from, T: to, W: weight} -} - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g *WeightedUndirectedGraph) Node(id int64) graph.Node { - return g.nodes[id] -} - -// Nodes returns all the nodes in the graph. -func (g *WeightedUndirectedGraph) Nodes() graph.Nodes { - if len(g.nodes) == 0 { - return graph.Empty - } - nodes := make([]graph.Node, len(g.nodes)) - i := 0 - for _, n := range g.nodes { - nodes[i] = n - i++ - } - return iterator.NewOrderedNodes(nodes) -} - -// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal -// nodes. If the edge does not exist it is a no-op. -func (g *WeightedUndirectedGraph) RemoveEdge(fid, tid int64) { - if _, ok := g.nodes[fid]; !ok { - return - } - if _, ok := g.nodes[tid]; !ok { - return - } - - delete(g.edges[fid], tid) - delete(g.edges[tid], fid) -} - -// RemoveNode removes the node with the given ID from the graph, as well as any edges attached -// to it. If the node is not in the graph it is a no-op. -func (g *WeightedUndirectedGraph) RemoveNode(id int64) { - if _, ok := g.nodes[id]; !ok { - return - } - delete(g.nodes, id) - - for from := range g.edges[id] { - delete(g.edges[from], id) - } - delete(g.edges, id) - - g.nodeIDs.Release(id) -} - -// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added -// and are set to the nodes of the edge otherwise. -// It will panic if the IDs of the e.From and e.To are equal. -func (g *WeightedUndirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { - var ( - from = e.From() - fid = from.ID() - to = e.To() - tid = to.ID() - ) - - if fid == tid { - panic("simple: adding self edge") - } - - if _, ok := g.nodes[fid]; !ok { - g.AddNode(from) - } else { - g.nodes[fid] = from - } - if _, ok := g.nodes[tid]; !ok { - g.AddNode(to) - } else { - g.nodes[tid] = to - } - - g.edges[fid][tid] = e - g.edges[tid][fid] = e -} - -// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. -// If x and y are the same node or there is no joining edge between the two nodes the weight -// value returned is either the graph's absent or self value. Weight returns true if an edge -// exists between x and y or if x and y have the same ID, false otherwise. -func (g *WeightedUndirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { - if xid == yid { - return g.self, true - } - if n, ok := g.edges[xid]; ok { - if e, ok := n[yid]; ok { - return e.Weight(), true - } - } - return g.absent, false -} - -// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -func (g *WeightedUndirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { - return g.WeightedEdgeBetween(uid, vid) -} - -// WeightedEdgeBetween returns the weighted edge between nodes x and y. -func (g *WeightedUndirectedGraph) WeightedEdgeBetween(xid, yid int64) graph.WeightedEdge { - edge, ok := g.edges[xid][yid] - if !ok { - return nil - } - if edge.From().ID() == xid { - return edge - } - return edge.ReversedEdge().(graph.WeightedEdge) -} - -// WeightedEdges returns all the weighted edges in the graph. -func (g *WeightedUndirectedGraph) WeightedEdges() graph.WeightedEdges { - var edges []graph.WeightedEdge - seen := make(map[[2]int64]struct{}) - for _, u := range g.edges { - for _, e := range u { - uid := e.From().ID() - vid := e.To().ID() - if _, ok := seen[[2]int64{uid, vid}]; ok { - continue - } - seen[[2]int64{uid, vid}] = struct{}{} - seen[[2]int64{vid, uid}] = struct{}{} - edges = append(edges, e) - } - } - if len(edges) == 0 { - return graph.Empty - } - return iterator.NewOrderedWeightedEdges(edges) -} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go deleted file mode 100644 index 83fdb5bdf..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" - "gonum.org/v1/gonum/graph/internal/set" -) - -// DegeneracyOrdering returns the degeneracy ordering and the k-cores of -// the undirected graph g. -func DegeneracyOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) { - order, offsets := degeneracyOrdering(g) - - ordered.Reverse(order) - cores = make([][]graph.Node, len(offsets)) - offset := len(order) - for i, n := range offsets { - cores[i] = order[offset-n : offset] - offset -= n - } - return order, cores -} - -// KCore returns the k-core of the undirected graph g with nodes in an -// optimal ordering for the coloring number. -func KCore(k int, g graph.Undirected) []graph.Node { - order, offsets := degeneracyOrdering(g) - - var offset int - for _, n := range offsets[:k] { - offset += n - } - core := make([]graph.Node, len(order)-offset) - copy(core, order[offset:]) - return core -} - -// degeneracyOrdering is the common code for DegeneracyOrdering and KCore. It -// returns l, the nodes of g in optimal ordering for coloring number and -// s, a set of relative offsets into l for each k-core, where k is an index -// into s. -func degeneracyOrdering(g graph.Undirected) (l []graph.Node, s []int) { - nodes := graph.NodesOf(g.Nodes()) - - // The algorithm used here is essentially as described at - // http://en.wikipedia.org/w/index.php?title=Degeneracy_%28graph_theory%29&oldid=640308710 - - // Initialize an output list L in return parameters. - - // Compute a number d_v for each vertex v in G, - // the number of neighbors of v that are not already in L. - // Initially, these numbers are just the degrees of the vertices. - dv := make(map[int64]int, len(nodes)) - var ( - maxDegree int - neighbours = make(map[int64][]graph.Node) - ) - for _, n := range nodes { - id := n.ID() - adj := graph.NodesOf(g.From(id)) - neighbours[id] = adj - dv[id] = len(adj) - if len(adj) > maxDegree { - maxDegree = len(adj) - } - } - - // Initialize an array D such that D[i] contains a list of the - // vertices v that are not already in L for which d_v = i. - d := make([][]graph.Node, maxDegree+1) - for _, n := range nodes { - deg := dv[n.ID()] - d[deg] = append(d[deg], n) - } - - // Initialize k to 0. - k := 0 - // Repeat n times: - s = []int{0} - for range nodes { - // Scan the array cells D[0], D[1], ... until - // finding an i for which D[i] is nonempty. - var ( - i int - di []graph.Node - ) - for i, di = range d { - if len(di) != 0 { - break - } - } - - // Set k to max(k,i). - if i > k { - k = i - s = append(s, make([]int, k-len(s)+1)...) - } - - // Select a vertex v from D[i]. Add v to the - // beginning of L and remove it from D[i]. - var v graph.Node - v, d[i] = di[len(di)-1], di[:len(di)-1] - l = append(l, v) - s[k]++ - delete(dv, v.ID()) - - // For each neighbor w of v not already in L, - // subtract one from d_w and move w to the - // cell of D corresponding to the new value of d_w. - for _, w := range neighbours[v.ID()] { - dw, ok := dv[w.ID()] - if !ok { - continue - } - for i, n := range d[dw] { - if n.ID() == w.ID() { - d[dw][i], d[dw] = d[dw][len(d[dw])-1], d[dw][:len(d[dw])-1] - dw-- - d[dw] = append(d[dw], w) - break - } - } - dv[w.ID()] = dw - } - } - - return l, s -} - -// BronKerbosch returns the set of maximal cliques of the undirected graph g. -func BronKerbosch(g graph.Undirected) [][]graph.Node { - nodes := graph.NodesOf(g.Nodes()) - - // The algorithm used here is essentially BronKerbosch3 as described at - // http://en.wikipedia.org/w/index.php?title=Bron%E2%80%93Kerbosch_algorithm&oldid=656805858 - - p := set.NewNodesSize(len(nodes)) - for _, n := range nodes { - p.Add(n) - } - x := set.NewNodes() - var bk bronKerbosch - order, _ := degeneracyOrdering(g) - ordered.Reverse(order) - for _, v := range order { - neighbours := graph.NodesOf(g.From(v.ID())) - nv := set.NewNodesSize(len(neighbours)) - for _, n := range neighbours { - nv.Add(n) - } - bk.maximalCliquePivot(g, []graph.Node{v}, set.IntersectionOfNodes(p, nv), set.IntersectionOfNodes(x, nv)) - p.Remove(v) - x.Add(v) - } - return bk -} - -type bronKerbosch [][]graph.Node - -func (bk *bronKerbosch) maximalCliquePivot(g graph.Undirected, r []graph.Node, p, x set.Nodes) { - if len(p) == 0 && len(x) == 0 { - *bk = append(*bk, r) - return - } - - neighbours := bk.choosePivotFrom(g, p, x) - nu := set.NewNodesSize(len(neighbours)) - for _, n := range neighbours { - nu.Add(n) - } - for _, v := range p { - if nu.Has(v) { - continue - } - vid := v.ID() - neighbours := graph.NodesOf(g.From(vid)) - nv := set.NewNodesSize(len(neighbours)) - for _, n := range neighbours { - nv.Add(n) - } - - var found bool - for _, n := range r { - if n.ID() == vid { - found = true - break - } - } - var sr []graph.Node - if !found { - sr = append(r[:len(r):len(r)], v) - } - - bk.maximalCliquePivot(g, sr, set.IntersectionOfNodes(p, nv), set.IntersectionOfNodes(x, nv)) - p.Remove(v) - x.Add(v) - } -} - -func (*bronKerbosch) choosePivotFrom(g graph.Undirected, p, x set.Nodes) (neighbors []graph.Node) { - // TODO(kortschak): Investigate the impact of pivot choice that maximises - // |p ⋂ neighbours(u)| as a function of input size. Until then, leave as - // compile time option. - if !tomitaTanakaTakahashi { - for _, n := range p { - return graph.NodesOf(g.From(n.ID())) - } - for _, n := range x { - return graph.NodesOf(g.From(n.ID())) - } - panic("bronKerbosch: empty set") - } - - var ( - max = -1 - pivot graph.Node - ) - maxNeighbors := func(s set.Nodes) { - outer: - for _, u := range s { - nb := graph.NodesOf(g.From(u.ID())) - c := len(nb) - if c <= max { - continue - } - for n := range nb { - if _, ok := p[int64(n)]; ok { - continue - } - c-- - if c <= max { - continue outer - } - } - max = c - pivot = u - neighbors = nb - } - } - maxNeighbors(p) - maxNeighbors(x) - if pivot == nil { - panic("bronKerbosch: empty set") - } - return neighbors -} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go deleted file mode 100644 index 28f1b96ee..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "sort" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" - "gonum.org/v1/gonum/graph/internal/set" -) - -// Builder is a pure topological graph construction type. -type Builder interface { - AddNode(graph.Node) - SetEdge(graph.Edge) -} - -// CliqueGraph builds the clique graph of g in dst using Clique and CliqueGraphEdge -// nodes and edges. The nodes returned by calls to Nodes on the nodes and edges of -// the constructed graph are the cliques and the common nodes between cliques -// respectively. The dst graph is not cleared. -func CliqueGraph(dst Builder, g graph.Undirected) { - cliques := BronKerbosch(g) - - // Construct a consistent view of cliques in g. Sorting costs - // us a little, but not as much as the cliques themselves. - for _, c := range cliques { - sort.Sort(ordered.ByID(c)) - } - sort.Sort(ordered.BySliceIDs(cliques)) - - cliqueNodes := make(cliqueNodeSets, len(cliques)) - for id, c := range cliques { - s := set.NewNodesSize(len(c)) - for _, n := range c { - s.Add(n) - } - ns := &nodeSet{Clique: Clique{id: int64(id), nodes: c}, nodes: s} - dst.AddNode(ns.Clique) - for _, n := range c { - nid := n.ID() - cliqueNodes[nid] = append(cliqueNodes[nid], ns) - } - } - - for _, cliques := range cliqueNodes { - for i, uc := range cliques { - for _, vc := range cliques[i+1:] { - // Retain the nodes that contribute to the - // edge between the cliques. - var edgeNodes []graph.Node - switch 1 { - case len(uc.Clique.nodes): - edgeNodes = []graph.Node{uc.Clique.nodes[0]} - case len(vc.Clique.nodes): - edgeNodes = []graph.Node{vc.Clique.nodes[0]} - default: - for _, n := range set.IntersectionOfNodes(uc.nodes, vc.nodes) { - edgeNodes = append(edgeNodes, n) - } - sort.Sort(ordered.ByID(edgeNodes)) - } - - dst.SetEdge(CliqueGraphEdge{from: uc.Clique, to: vc.Clique, nodes: edgeNodes}) - } - } - } -} - -type cliqueNodeSets map[int64][]*nodeSet - -type nodeSet struct { - Clique - nodes set.Nodes -} - -// Clique is a node in a clique graph. -type Clique struct { - id int64 - nodes []graph.Node -} - -// ID returns the node ID. -func (n Clique) ID() int64 { return n.id } - -// Nodes returns the nodes in the clique. -func (n Clique) Nodes() []graph.Node { return n.nodes } - -// CliqueGraphEdge is an edge in a clique graph. -type CliqueGraphEdge struct { - from, to Clique - nodes []graph.Node -} - -// From returns the from node of the edge. -func (e CliqueGraphEdge) From() graph.Node { return e.from } - -// To returns the to node of the edge. -func (e CliqueGraphEdge) To() graph.Node { return e.to } - -// ReversedEdge returns a new CliqueGraphEdge with -// the edge end points swapped. The nodes of the -// new edge are shared with the receiver. -func (e CliqueGraphEdge) ReversedEdge() graph.Edge { e.from, e.to = e.to, e.from; return e } - -// Nodes returns the common nodes in the cliques of the underlying graph -// corresponding to the from and to nodes in the clique graph. -func (e CliqueGraphEdge) Nodes() []graph.Node { return e.nodes } diff --git a/vendor/gonum.org/v1/gonum/graph/topo/doc.go b/vendor/gonum.org/v1/gonum/graph/topo/doc.go deleted file mode 100644 index cbcdff1e7..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package topo provides graph topology analysis functions. -package topo // import "gonum.org/v1/gonum/graph/topo" diff --git a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go deleted file mode 100644 index 8a78ba2f3..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "sort" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" - "gonum.org/v1/gonum/graph/internal/set" - "gonum.org/v1/gonum/graph/iterator" -) - -// johnson implements Johnson's "Finding all the elementary -// circuits of a directed graph" algorithm. SIAM J. Comput. 4(1):1975. -// -// Comments in the johnson methods are kept in sync with the comments -// and labels from the paper. -type johnson struct { - adjacent johnsonGraph // SCC adjacency list. - b []set.Ints // Johnson's "B-list". - blocked []bool - s int - - stack []graph.Node - - result [][]graph.Node -} - -// DirectedCyclesIn returns the set of elementary cycles in the graph g. -func DirectedCyclesIn(g graph.Directed) [][]graph.Node { - jg := johnsonGraphFrom(g) - j := johnson{ - adjacent: jg, - b: make([]set.Ints, len(jg.orig)), - blocked: make([]bool, len(jg.orig)), - } - - // len(j.nodes) is the order of g. - for j.s < len(j.adjacent.orig)-1 { - // We use the previous SCC adjacency to reduce the work needed. - sccs := TarjanSCC(j.adjacent.subgraph(j.s)) - // A_k = adjacency structure of strong component K with least - // vertex in subgraph of G induced by {s, s+1, ... ,n}. - j.adjacent = j.adjacent.sccSubGraph(sccs, 2) // Only allow SCCs with >= 2 vertices. - if j.adjacent.order() == 0 { - break - } - - // s = least vertex in V_k - if s := j.adjacent.leastVertexIndex(); s < j.s { - j.s = s - } - for i, v := range j.adjacent.orig { - if !j.adjacent.nodes.Has(v.ID()) { - continue - } - if len(j.adjacent.succ[v.ID()]) > 0 { - j.blocked[i] = false - j.b[i] = make(set.Ints) - } - } - //L3: - _ = j.circuit(j.s) - j.s++ - } - - return j.result -} - -// circuit is the CIRCUIT sub-procedure in the paper. -func (j *johnson) circuit(v int) bool { - f := false - n := j.adjacent.orig[v] - j.stack = append(j.stack, n) - j.blocked[v] = true - - //L1: - for w := range j.adjacent.succ[n.ID()] { - w := j.adjacent.indexOf(w) - if w == j.s { - // Output circuit composed of stack followed by s. - r := make([]graph.Node, len(j.stack)+1) - copy(r, j.stack) - r[len(r)-1] = j.adjacent.orig[j.s] - j.result = append(j.result, r) - f = true - } else if !j.blocked[w] { - if j.circuit(w) { - f = true - } - } - } - - //L2: - if f { - j.unblock(v) - } else { - for w := range j.adjacent.succ[n.ID()] { - j.b[j.adjacent.indexOf(w)].Add(v) - } - } - j.stack = j.stack[:len(j.stack)-1] - - return f -} - -// unblock is the UNBLOCK sub-procedure in the paper. -func (j *johnson) unblock(u int) { - j.blocked[u] = false - for w := range j.b[u] { - j.b[u].Remove(w) - if j.blocked[w] { - j.unblock(w) - } - } -} - -// johnsonGraph is an edge list representation of a graph with helpers -// necessary for Johnson's algorithm -type johnsonGraph struct { - // Keep the original graph nodes and a - // look-up to into the non-sparse - // collection of potentially sparse IDs. - orig []graph.Node - index map[int64]int - - nodes set.Int64s - succ map[int64]set.Int64s -} - -// johnsonGraphFrom returns a deep copy of the graph g. -func johnsonGraphFrom(g graph.Directed) johnsonGraph { - nodes := graph.NodesOf(g.Nodes()) - sort.Sort(ordered.ByID(nodes)) - c := johnsonGraph{ - orig: nodes, - index: make(map[int64]int, len(nodes)), - - nodes: make(set.Int64s, len(nodes)), - succ: make(map[int64]set.Int64s), - } - for i, u := range nodes { - uid := u.ID() - c.index[uid] = i - for _, v := range graph.NodesOf(g.From(uid)) { - if c.succ[uid] == nil { - c.succ[uid] = make(set.Int64s) - c.nodes.Add(uid) - } - c.nodes.Add(v.ID()) - c.succ[uid].Add(v.ID()) - } - } - return c -} - -// order returns the order of the graph. -func (g johnsonGraph) order() int { return g.nodes.Count() } - -// indexOf returns the index of the retained node for the given node ID. -func (g johnsonGraph) indexOf(id int64) int { - return g.index[id] -} - -// leastVertexIndex returns the index into orig of the least vertex. -func (g johnsonGraph) leastVertexIndex() int { - for _, v := range g.orig { - if g.nodes.Has(v.ID()) { - return g.indexOf(v.ID()) - } - } - panic("johnsonCycles: empty set") -} - -// subgraph returns a subgraph of g induced by {s, s+1, ... , n}. The -// subgraph is destructively generated in g. -func (g johnsonGraph) subgraph(s int) johnsonGraph { - sn := g.orig[s].ID() - for u, e := range g.succ { - if u < sn { - g.nodes.Remove(u) - delete(g.succ, u) - continue - } - for v := range e { - if v < sn { - g.succ[u].Remove(v) - } - } - } - return g -} - -// sccSubGraph returns the graph of the tarjan's strongly connected -// components with each SCC containing at least min vertices. -// sccSubGraph returns nil if there is no SCC with at least min -// members. -func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { - if len(g.nodes) == 0 { - g.nodes = nil - g.succ = nil - return g - } - sub := johnsonGraph{ - orig: g.orig, - index: g.index, - nodes: make(set.Int64s), - succ: make(map[int64]set.Int64s), - } - - var n int - for _, scc := range sccs { - if len(scc) < min { - continue - } - n++ - for _, u := range scc { - for _, v := range scc { - if _, ok := g.succ[u.ID()][v.ID()]; ok { - if sub.succ[u.ID()] == nil { - sub.succ[u.ID()] = make(set.Int64s) - sub.nodes.Add(u.ID()) - } - sub.nodes.Add(v.ID()) - sub.succ[u.ID()].Add(v.ID()) - } - } - } - } - if n == 0 { - g.nodes = nil - g.succ = nil - return g - } - - return sub -} - -// Nodes is required to satisfy Tarjan. -func (g johnsonGraph) Nodes() graph.Nodes { - n := make([]graph.Node, 0, len(g.nodes)) - for id := range g.nodes { - n = append(n, johnsonGraphNode(id)) - } - return iterator.NewOrderedNodes(n) -} - -// Successors is required to satisfy Tarjan. -func (g johnsonGraph) From(id int64) graph.Nodes { - adj := g.succ[id] - if len(adj) == 0 { - return graph.Empty - } - succ := make([]graph.Node, 0, len(adj)) - for id := range adj { - succ = append(succ, johnsonGraphNode(id)) - } - return iterator.NewOrderedNodes(succ) -} - -func (johnsonGraph) Has(int64) bool { - panic("topo: unintended use of johnsonGraph") -} -func (johnsonGraph) Node(int64) graph.Node { - panic("topo: unintended use of johnsonGraph") -} -func (johnsonGraph) HasEdgeBetween(_, _ int64) bool { - panic("topo: unintended use of johnsonGraph") -} -func (johnsonGraph) Edge(_, _ int64) graph.Edge { - panic("topo: unintended use of johnsonGraph") -} -func (johnsonGraph) HasEdgeFromTo(_, _ int64) bool { - panic("topo: unintended use of johnsonGraph") -} -func (johnsonGraph) To(int64) graph.Nodes { - panic("topo: unintended use of johnsonGraph") -} - -type johnsonGraphNode int64 - -func (n johnsonGraphNode) ID() int64 { return int64(n) } diff --git a/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go b/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go deleted file mode 100644 index 36171d6fe..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !tomita - -package topo - -const tomitaTanakaTakahashi = false diff --git a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go deleted file mode 100644 index 44b362a6f..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/linear" - "gonum.org/v1/gonum/graph/internal/set" -) - -// UndirectedCyclesIn returns a set of cycles that forms a cycle basis in the graph g. -// Any cycle in g can be constructed as a symmetric difference of its elements. -func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { - // From "An algorithm for finding a fundamental set of cycles of a graph" - // https://doi.org/10.1145/363219.363232 - - var cycles [][]graph.Node - done := make(set.Int64s) - var tree linear.NodeStack - nodes := g.Nodes() - for nodes.Next() { - n := nodes.Node() - id := n.ID() - if done.Has(id) { - continue - } - done.Add(id) - - tree = tree[:0] - tree.Push(n) - from := sets{id: set.Int64s{}} - to := map[int64]graph.Node{id: n} - - for tree.Len() != 0 { - u := tree.Pop() - uid := u.ID() - adj := from[uid] - for _, v := range graph.NodesOf(g.From(uid)) { - vid := v.ID() - switch { - case uid == vid: - cycles = append(cycles, []graph.Node{u}) - case !from.has(vid): - done.Add(vid) - to[vid] = u - tree.Push(v) - from.add(uid, vid) - case !adj.Has(vid): - c := []graph.Node{v, u} - adj := from[vid] - p := to[uid] - for !adj.Has(p.ID()) { - c = append(c, p) - p = to[p.ID()] - } - c = append(c, p, c[0]) - cycles = append(cycles, c) - adj.Add(uid) - } - } - } - } - - return cycles -} - -type sets map[int64]set.Int64s - -func (s sets) add(uid, vid int64) { - e, ok := s[vid] - if !ok { - e = make(set.Int64s) - s[vid] = e - } - e.Add(uid) -} - -func (s sets) has(uid int64) bool { - _, ok := s[uid] - return ok -} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go deleted file mode 100644 index 647129275..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "fmt" - "sort" - - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" - "gonum.org/v1/gonum/graph/internal/set" -) - -// Unorderable is an error containing sets of unorderable graph.Nodes. -type Unorderable [][]graph.Node - -// Error satisfies the error interface. -func (e Unorderable) Error() string { - const maxNodes = 10 - var n int - for _, c := range e { - n += len(c) - } - if n > maxNodes { - // Don't return errors that are too long. - return fmt.Sprintf("topo: no topological ordering: %d nodes in %d cyclic components", n, len(e)) - } - return fmt.Sprintf("topo: no topological ordering: cyclic components: %v", [][]graph.Node(e)) -} - -func lexical(nodes []graph.Node) { sort.Sort(ordered.ByID(nodes)) } - -// Sort performs a topological sort of the directed graph g returning the 'from' to 'to' -// sort order. If a topological ordering is not possible, an Unorderable error is returned -// listing cyclic components in g with each cyclic component's members sorted by ID. When -// an Unorderable error is returned, each cyclic component's topological position within -// the sorted nodes is marked with a nil graph.Node. -func Sort(g graph.Directed) (sorted []graph.Node, err error) { - sccs := TarjanSCC(g) - return sortedFrom(sccs, lexical) -} - -// SortStabilized performs a topological sort of the directed graph g returning the 'from' -// to 'to' sort order, or the order defined by the in place order sort function where there -// is no unambiguous topological ordering. If a topological ordering is not possible, an -// Unorderable error is returned listing cyclic components in g with each cyclic component's -// members sorted by the provided order function. If order is nil, nodes are ordered lexically -// by node ID. When an Unorderable error is returned, each cyclic component's topological -// position within the sorted nodes is marked with a nil graph.Node. -func SortStabilized(g graph.Directed, order func([]graph.Node)) (sorted []graph.Node, err error) { - if order == nil { - order = lexical - } - sccs := tarjanSCCstabilized(g, order) - return sortedFrom(sccs, order) -} - -func sortedFrom(sccs [][]graph.Node, order func([]graph.Node)) ([]graph.Node, error) { - sorted := make([]graph.Node, 0, len(sccs)) - var sc Unorderable - for _, s := range sccs { - if len(s) != 1 { - order(s) - sc = append(sc, s) - sorted = append(sorted, nil) - continue - } - sorted = append(sorted, s[0]) - } - var err error - if sc != nil { - for i, j := 0, len(sc)-1; i < j; i, j = i+1, j-1 { - sc[i], sc[j] = sc[j], sc[i] - } - err = sc - } - ordered.Reverse(sorted) - return sorted, err -} - -// TarjanSCC returns the strongly connected components of the graph g using Tarjan's algorithm. -// -// A strongly connected component of a graph is a set of vertices where it's possible to reach any -// vertex in the set from any other (meaning there's a cycle between them.) -// -// Generally speaking, a directed graph where the number of strongly connected components is equal -// to the number of nodes is acyclic, unless you count reflexive edges as a cycle (which requires -// only a little extra testing.) -// -func TarjanSCC(g graph.Directed) [][]graph.Node { - return tarjanSCCstabilized(g, nil) -} - -func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.Node { - nodes := graph.NodesOf(g.Nodes()) - var succ func(id int64) []graph.Node - if order == nil { - succ = func(id int64) []graph.Node { - return graph.NodesOf(g.From(id)) - } - } else { - order(nodes) - ordered.Reverse(nodes) - - succ = func(id int64) []graph.Node { - to := graph.NodesOf(g.From(id)) - order(to) - ordered.Reverse(to) - return to - } - } - - t := tarjan{ - succ: succ, - - indexTable: make(map[int64]int, len(nodes)), - lowLink: make(map[int64]int, len(nodes)), - onStack: make(set.Int64s), - } - for _, v := range nodes { - if t.indexTable[v.ID()] == 0 { - t.strongconnect(v) - } - } - return t.sccs -} - -// tarjan implements Tarjan's strongly connected component finding -// algorithm. The implementation is from the pseudocode at -// -// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm?oldid=642744644 -// -type tarjan struct { - succ func(id int64) []graph.Node - - index int - indexTable map[int64]int - lowLink map[int64]int - onStack set.Int64s - - stack []graph.Node - - sccs [][]graph.Node -} - -// strongconnect is the strongconnect function described in the -// wikipedia article. -func (t *tarjan) strongconnect(v graph.Node) { - vID := v.ID() - - // Set the depth index for v to the smallest unused index. - t.index++ - t.indexTable[vID] = t.index - t.lowLink[vID] = t.index - t.stack = append(t.stack, v) - t.onStack.Add(vID) - - // Consider successors of v. - for _, w := range t.succ(vID) { - wID := w.ID() - if t.indexTable[wID] == 0 { - // Successor w has not yet been visited; recur on it. - t.strongconnect(w) - t.lowLink[vID] = min(t.lowLink[vID], t.lowLink[wID]) - } else if t.onStack.Has(wID) { - // Successor w is in stack s and hence in the current SCC. - t.lowLink[vID] = min(t.lowLink[vID], t.indexTable[wID]) - } - } - - // If v is a root node, pop the stack and generate an SCC. - if t.lowLink[vID] == t.indexTable[vID] { - // Start a new strongly connected component. - var ( - scc []graph.Node - w graph.Node - ) - for { - w, t.stack = t.stack[len(t.stack)-1], t.stack[:len(t.stack)-1] - t.onStack.Remove(w.ID()) - // Add w to current strongly connected component. - scc = append(scc, w) - if w.ID() == vID { - break - } - } - // Output the current strongly connected component. - t.sccs = append(t.sccs, scc) - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go b/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go deleted file mode 100644 index f85a0d6c0..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build tomita - -package topo - -const tomitaTanakaTakahashi = true diff --git a/vendor/gonum.org/v1/gonum/graph/topo/topo.go b/vendor/gonum.org/v1/gonum/graph/topo/topo.go deleted file mode 100644 index bece61a6c..000000000 --- a/vendor/gonum.org/v1/gonum/graph/topo/topo.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/traverse" -) - -// IsPathIn returns whether path is a path in g. -// -// As special cases, IsPathIn returns true for a zero length path or for -// a path of length 1 when the node in path exists in the graph. -func IsPathIn(g graph.Graph, path []graph.Node) bool { - switch len(path) { - case 0: - return true - case 1: - return g.Node(path[0].ID()) != nil - default: - var canReach func(uid, vid int64) bool - switch g := g.(type) { - case graph.Directed: - canReach = g.HasEdgeFromTo - default: - canReach = g.HasEdgeBetween - } - - for i, u := range path[:len(path)-1] { - if !canReach(u.ID(), path[i+1].ID()) { - return false - } - } - return true - } -} - -// PathExistsIn returns whether there is a path in g starting at from extending -// to to. -// -// PathExistsIn exists as a helper function. If many tests for path existence -// are being performed, other approaches will be more efficient. -func PathExistsIn(g graph.Graph, from, to graph.Node) bool { - var t traverse.BreadthFirst - return t.Walk(g, from, func(n graph.Node, _ int) bool { return n.ID() == to.ID() }) != nil -} - -// ConnectedComponents returns the connected components of the undirected graph g. -func ConnectedComponents(g graph.Undirected) [][]graph.Node { - var ( - w traverse.DepthFirst - c []graph.Node - cc [][]graph.Node - ) - during := func(n graph.Node) { - c = append(c, n) - } - after := func() { - cc = append(cc, []graph.Node(nil)) - cc[len(cc)-1] = append(cc[len(cc)-1], c...) - c = c[:0] - } - w.WalkAll(g, nil, after, during) - - return cc -} diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/doc.go b/vendor/gonum.org/v1/gonum/graph/traverse/doc.go deleted file mode 100644 index dc98bbf43..000000000 --- a/vendor/gonum.org/v1/gonum/graph/traverse/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package traverse provides basic graph traversal primitives. -package traverse // import "gonum.org/v1/gonum/graph/traverse" diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go deleted file mode 100644 index 125b16114..000000000 --- a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package traverse - -import ( - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/linear" - "gonum.org/v1/gonum/graph/internal/set" -) - -var _ Graph = graph.Graph(nil) - -// Graph is the subset of graph.Graph necessary for graph traversal. -type Graph interface { - // From returns all nodes that can be reached directly - // from the node with the given ID. - From(id int64) graph.Nodes - - // Edge returns the edge from u to v, with IDs uid and vid, - // if such an edge exists and nil otherwise. The node v - // must be directly reachable from u as defined by - // the From method. - Edge(uid, vid int64) graph.Edge -} - -// BreadthFirst implements stateful breadth-first graph traversal. -type BreadthFirst struct { - // Visit is called on all nodes on their first visit. - Visit func(graph.Node) - - // Traverse is called on all edges that may be traversed - // during the walk. This includes edges that would hop to - // an already visited node. - // - // The value returned by Traverse determines whether - // an edge can be traversed during the walk. - Traverse func(graph.Edge) bool - - queue linear.NodeQueue - visited set.Int64s -} - -// Walk performs a breadth-first traversal of the graph g starting from the given node, -// depending on the Traverse field and the until parameter if they are non-nil. -// The traversal follows edges for which Traverse(edge) is true and returns the first node -// for which until(node, depth) is true. During the traversal, if the Visit field is -// non-nil, it is called with each node the first time it is visited. -func (b *BreadthFirst) Walk(g Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node { - if b.visited == nil { - b.visited = make(set.Int64s) - } - b.queue.Enqueue(from) - if b.Visit != nil && !b.visited.Has(from.ID()) { - b.Visit(from) - } - b.visited.Add(from.ID()) - - var ( - depth int - children int - untilNext = 1 - ) - for b.queue.Len() > 0 { - t := b.queue.Dequeue() - if until != nil && until(t, depth) { - return t - } - tid := t.ID() - to := g.From(tid) - for to.Next() { - n := to.Node() - nid := n.ID() - if b.Traverse != nil && !b.Traverse(g.Edge(tid, nid)) { - continue - } - if b.visited.Has(nid) { - continue - } - if b.Visit != nil { - b.Visit(n) - } - b.visited.Add(nid) - children++ - b.queue.Enqueue(n) - } - if untilNext--; untilNext == 0 { - depth++ - untilNext = children - children = 0 - } - } - - return nil -} - -// WalkAll calls Walk for each unvisited node of the graph g using edges independent -// of their direction. The functions before and after are called prior to commencing -// and after completing each walk if they are non-nil respectively. The function -// during is called on each node as it is traversed. -func (b *BreadthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { - b.Reset() - nodes := g.Nodes() - for nodes.Next() { - from := nodes.Node() - if b.Visited(from) { - continue - } - if before != nil { - before() - } - b.Walk(g, from, func(n graph.Node, _ int) bool { - if during != nil { - during(n) - } - return false - }) - if after != nil { - after() - } - } -} - -// Visited returned whether the node n was visited during a traverse. -func (b *BreadthFirst) Visited(n graph.Node) bool { - return b.visited.Has(n.ID()) -} - -// Reset resets the state of the traverser for reuse. -func (b *BreadthFirst) Reset() { - b.queue.Reset() - b.visited = nil -} - -// DepthFirst implements stateful depth-first graph traversal. -type DepthFirst struct { - // Visit is called on all nodes on their first visit. - Visit func(graph.Node) - - // Traverse is called on all edges that may be traversed - // during the walk. This includes edges that would hop to - // an already visited node. - // - // The value returned by Traverse determines whether an - // edge can be traversed during the walk. - Traverse func(graph.Edge) bool - - stack linear.NodeStack - visited set.Int64s -} - -// Walk performs a depth-first traversal of the graph g starting from the given node, -// depending on the Traverse field and the until parameter if they are non-nil. -// The traversal follows edges for which Traverse(edge) is true and returns the first node -// for which until(node) is true. During the traversal, if the Visit field is non-nil, it -// is called with each node the first time it is visited. -func (d *DepthFirst) Walk(g Graph, from graph.Node, until func(graph.Node) bool) graph.Node { - if d.visited == nil { - d.visited = make(set.Int64s) - } - d.stack.Push(from) - if d.Visit != nil && !d.visited.Has(from.ID()) { - d.Visit(from) - } - d.visited.Add(from.ID()) - - for d.stack.Len() > 0 { - t := d.stack.Pop() - if until != nil && until(t) { - return t - } - tid := t.ID() - to := g.From(tid) - for to.Next() { - n := to.Node() - nid := n.ID() - if d.Traverse != nil && !d.Traverse(g.Edge(tid, nid)) { - continue - } - if d.visited.Has(nid) { - continue - } - if d.Visit != nil { - d.Visit(n) - } - d.visited.Add(nid) - d.stack.Push(n) - } - } - - return nil -} - -// WalkAll calls Walk for each unvisited node of the graph g using edges independent -// of their direction. The functions before and after are called prior to commencing -// and after completing each walk if they are non-nil respectively. The function -// during is called on each node as it is traversed. -func (d *DepthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { - d.Reset() - nodes := g.Nodes() - for nodes.Next() { - from := nodes.Node() - if d.Visited(from) { - continue - } - if before != nil { - before() - } - d.Walk(g, from, func(n graph.Node) bool { - if during != nil { - during(n) - } - return false - }) - if after != nil { - after() - } - } -} - -// Visited returned whether the node n was visited during a traverse. -func (d *DepthFirst) Visited(n graph.Node) bool { - return d.visited.Has(n.ID()) -} - -// Reset resets the state of the traverser for reuse. -func (d *DepthFirst) Reset() { - d.stack = d.stack[:0] - d.visited = nil -} diff --git a/vendor/gonum.org/v1/gonum/graph/undirect.go b/vendor/gonum.org/v1/gonum/graph/undirect.go deleted file mode 100644 index 07ce64a06..000000000 --- a/vendor/gonum.org/v1/gonum/graph/undirect.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graph - -// Undirect converts a directed graph to an undirected graph. -type Undirect struct { - G Directed -} - -var _ Undirected = Undirect{} - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g Undirect) Node(id int64) Node { return g.G.Node(id) } - -// Nodes returns all the nodes in the graph. -func (g Undirect) Nodes() Nodes { return g.G.Nodes() } - -// From returns all nodes in g that can be reached directly from u. -func (g Undirect) From(uid int64) Nodes { - return newNodeFilterIterator(g.G.From(uid), g.G.To(uid)) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y. -func (g Undirect) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -// If an edge exists, the Edge returned is an EdgePair. The weight of -// the edge is determined by applying the Merge func to the weights of the -// edges between u and v. -func (g Undirect) Edge(uid, vid int64) Edge { return g.EdgeBetween(uid, vid) } - -// EdgeBetween returns the edge between nodes x and y. If an edge exists, the -// Edge returned is an EdgePair. The weight of the edge is determined by -// applying the Merge func to the weights of edges between x and y. -func (g Undirect) EdgeBetween(xid, yid int64) Edge { - fe := g.G.Edge(xid, yid) - re := g.G.Edge(yid, xid) - if fe == nil && re == nil { - return nil - } - - return EdgePair{fe, re} -} - -// UndirectWeighted converts a directed weighted graph to an undirected weighted graph, -// resolving edge weight conflicts. -type UndirectWeighted struct { - G WeightedDirected - - // Absent is the value used to - // represent absent edge weights - // passed to Merge if the reverse - // edge is present. - Absent float64 - - // Merge defines how discordant edge - // weights in G are resolved. A merge - // is performed if at least one edge - // exists between the nodes being - // considered. The edges corresponding - // to the two weights are also passed, - // in the same order. - // The order of weight parameters - // passed to Merge is not defined, so - // the function should be commutative. - // If Merge is nil, the arithmetic - // mean is used to merge weights. - Merge func(x, y float64, xe, ye Edge) float64 -} - -var ( - _ Undirected = UndirectWeighted{} - _ WeightedUndirected = UndirectWeighted{} -) - -// Node returns the node with the given ID if it exists in the graph, -// and nil otherwise. -func (g UndirectWeighted) Node(id int64) Node { return g.G.Node(id) } - -// Nodes returns all the nodes in the graph. -func (g UndirectWeighted) Nodes() Nodes { return g.G.Nodes() } - -// From returns all nodes in g that can be reached directly from u. -func (g UndirectWeighted) From(uid int64) Nodes { - return newNodeFilterIterator(g.G.From(uid), g.G.To(uid)) -} - -// HasEdgeBetween returns whether an edge exists between nodes x and y. -func (g UndirectWeighted) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } - -// Edge returns the edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -// If an edge exists, the Edge returned is an EdgePair. The weight of -// the edge is determined by applying the Merge func to the weights of the -// edges between u and v. -func (g UndirectWeighted) Edge(uid, vid int64) Edge { return g.WeightedEdgeBetween(uid, vid) } - -// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. -// The node v must be directly reachable from u as defined by the From method. -// If an edge exists, the Edge returned is an EdgePair. The weight of -// the edge is determined by applying the Merge func to the weights of the -// edges between u and v. -func (g UndirectWeighted) WeightedEdge(uid, vid int64) WeightedEdge { - return g.WeightedEdgeBetween(uid, vid) -} - -// EdgeBetween returns the edge between nodes x and y. If an edge exists, the -// Edge returned is an EdgePair. The weight of the edge is determined by -// applying the Merge func to the weights of edges between x and y. -func (g UndirectWeighted) EdgeBetween(xid, yid int64) Edge { - return g.WeightedEdgeBetween(xid, yid) -} - -// WeightedEdgeBetween returns the weighted edge between nodes x and y. If an edge exists, the -// Edge returned is an EdgePair. The weight of the edge is determined by -// applying the Merge func to the weights of edges between x and y. -func (g UndirectWeighted) WeightedEdgeBetween(xid, yid int64) WeightedEdge { - fe := g.G.Edge(xid, yid) - re := g.G.Edge(yid, xid) - if fe == nil && re == nil { - return nil - } - - f, ok := g.G.Weight(xid, yid) - if !ok { - f = g.Absent - } - r, ok := g.G.Weight(yid, xid) - if !ok { - r = g.Absent - } - - var w float64 - if g.Merge == nil { - w = (f + r) / 2 - } else { - w = g.Merge(f, r, fe, re) - } - return WeightedEdgePair{EdgePair: [2]Edge{fe, re}, W: w} -} - -// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. -// If x and y are the same node the internal node weight is returned. If there is no joining -// edge between the two nodes the weight value returned is zero. Weight returns true if an edge -// exists between x and y or if x and y have the same ID, false otherwise. -func (g UndirectWeighted) Weight(xid, yid int64) (w float64, ok bool) { - fe := g.G.Edge(xid, yid) - re := g.G.Edge(yid, xid) - - f, fOk := g.G.Weight(xid, yid) - if !fOk { - f = g.Absent - } - r, rOK := g.G.Weight(yid, xid) - if !rOK { - r = g.Absent - } - ok = fOk || rOK - - if g.Merge == nil { - return (f + r) / 2, ok - } - return g.Merge(f, r, fe, re), ok -} - -// EdgePair is an opposed pair of directed edges. -type EdgePair [2]Edge - -// From returns the from node of the first non-nil edge, or nil. -func (e EdgePair) From() Node { - if e[0] != nil { - return e[0].From() - } else if e[1] != nil { - return e[1].From() - } - return nil -} - -// To returns the to node of the first non-nil edge, or nil. -func (e EdgePair) To() Node { - if e[0] != nil { - return e[0].To() - } else if e[1] != nil { - return e[1].To() - } - return nil -} - -// ReversedEdge returns a new Edge with the end point of the -// edges in the pair swapped. -func (e EdgePair) ReversedEdge() Edge { - if e[0] != nil { - e[0] = e[0].ReversedEdge() - } - if e[1] != nil { - e[1] = e[1].ReversedEdge() - } - return e -} - -// WeightedEdgePair is an opposed pair of directed edges. -type WeightedEdgePair struct { - EdgePair - W float64 -} - -// ReversedEdge returns a new Edge with the end point of the -// edges in the pair swapped. -func (e WeightedEdgePair) ReversedEdge() Edge { - e.EdgePair = e.EdgePair.ReversedEdge().(EdgePair) - return e -} - -// Weight returns the merged edge weights of the two edges. -func (e WeightedEdgePair) Weight() float64 { return e.W } - -// nodeFilterIterator combines two Nodes to produce a single stream of -// unique nodes. -type nodeFilterIterator struct { - a, b Nodes - - // unique indicates the node in b with the key ID is unique. - unique map[int64]bool -} - -func newNodeFilterIterator(a, b Nodes) *nodeFilterIterator { - n := nodeFilterIterator{a: a, b: b, unique: make(map[int64]bool)} - for n.b.Next() { - n.unique[n.b.Node().ID()] = true - } - n.b.Reset() - for n.a.Next() { - n.unique[n.a.Node().ID()] = false - } - n.a.Reset() - return &n -} - -func (n *nodeFilterIterator) Len() int { - return len(n.unique) -} - -func (n *nodeFilterIterator) Next() bool { - n.Len() - if n.a.Next() { - return true - } - for n.b.Next() { - if n.unique[n.b.Node().ID()] { - return true - } - } - return false -} - -func (n *nodeFilterIterator) Node() Node { - if n.a.Len() != 0 { - return n.a.Node() - } - return n.b.Node() -} - -func (n *nodeFilterIterator) Reset() { - n.a.Reset() - n.b.Reset() -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s deleted file mode 100644 index 68490e51a..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVDDUP X2, X3 -#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA -// MOVDDUP X4, X5 -#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC -// MOVDDUP X6, X7 -#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE -// MOVDDUP X8, X9 -#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 - -// ADDSUBPD X2, X3 -#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA -// ADDSUBPD X4, X5 -#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC -// ADDSUBPD X6, X7 -#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE -// ADDSUBPD X8, X9 -#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyInc(SB), NOSPLIT, $0 - MOVQ x_base+16(FP), SI // SI = &x - MOVQ y_base+40(FP), DI // DI = &y - MOVQ n+64(FP), CX // CX = n - CMPQ CX, $0 // if n==0 { return } - JE axpyi_end - MOVQ ix+88(FP), R8 // R8 = ix // Load the first index - SHLQ $4, R8 // R8 *= sizeof(complex128) - MOVQ iy+96(FP), R9 // R9 = iy - SHLQ $4, R9 // R9 *= sizeof(complex128) - LEAQ (SI)(R8*1), SI // SI = &(x[ix]) - LEAQ (DI)(R9*1), DI // DI = &(y[iy]) - MOVQ DI, DX // DX = DI // Separate Read/Write pointers - MOVQ incX+72(FP), R8 // R8 = incX - SHLQ $4, R8 // R8 *= sizeof(complex128) - MOVQ incY+80(FP), R9 // R9 = iy - SHLQ $4, R9 // R9 *= sizeof(complex128) - MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } - MOVAPS X0, X1 - SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } - MOVAPS X0, X10 // Copy X0 and X1 for pipelining - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $3, CX // CX = n % 4 - SHRQ $2, BX // BX = floor( n / 4 ) - JZ axpyi_tail // if BX == 0 { goto axpyi_tail } - -axpyi_loop: // do { - MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVUPS (SI)(R8*1), X4 - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - MOVUPS (SI), X6 - MOVUPS (SI)(R8*1), X8 - - // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_X2_X3 - MOVDDUP_X4_X5 - MOVDDUP_X6_X7 - MOVDDUP_X8_X9 - - // X_i = { imag(x[i]), imag(x[i]) } - SHUFPD $0x3, X2, X2 - SHUFPD $0x3, X4, X4 - SHUFPD $0x3, X6, X6 - SHUFPD $0x3, X8, X8 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X1, X2 - MULPD X0, X3 - MULPD X11, X4 - MULPD X10, X5 - MULPD X1, X6 - MULPD X0, X7 - MULPD X11, X8 - MULPD X10, X9 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DX), X3 - ADDPD (DX)(R9*1), X5 - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - ADDPD (DX), X7 - ADDPD (DX)(R9*1), X9 - MOVUPS X3, (DI) // dst[i] = X_(i+1) - MOVUPS X5, (DI)(R9*1) - LEAQ (DI)(R9*2), DI - MOVUPS X7, (DI) - MOVUPS X9, (DI)(R9*1) - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) - DECQ BX - JNZ axpyi_loop // } while --BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE axpyi_end - -axpyi_tail: // do { - MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } - SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } - MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DI), X3 - MOVUPS X3, (DI) // y[i] = X_i - ADDQ R8, SI // SI = &(SI[incX]) - ADDQ R9, DI // DI = &(DI[incY]) - LOOP axpyi_tail // } while --CX > 0 - -axpyi_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s deleted file mode 100644 index 50d21f2cb..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVDDUP X2, X3 -#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA -// MOVDDUP X4, X5 -#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC -// MOVDDUP X6, X7 -#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE -// MOVDDUP X8, X9 -#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 - -// ADDSUBPD X2, X3 -#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA -// ADDSUBPD X4, X5 -#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC -// ADDSUBPD X6, X7 -#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE -// ADDSUBPD X8, X9 -#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyIncTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ x_base+56(FP), SI // SI = &x - MOVQ y_base+80(FP), DX // DX = &y - MOVQ n+104(FP), CX // CX = n - CMPQ CX, $0 // if n==0 { return } - JE axpyi_end - MOVQ ix+128(FP), R8 // R8 = ix // Load the first index - SHLQ $4, R8 // R8 *= sizeof(complex128) - MOVQ iy+136(FP), R9 // R9 = iy - SHLQ $4, R9 // R9 *= sizeof(complex128) - MOVQ idst+32(FP), R10 // R10 = idst - SHLQ $4, R10 // R10 *= sizeof(complex128) - LEAQ (SI)(R8*1), SI // SI = &(x[ix]) - LEAQ (DX)(R9*1), DX // DX = &(y[iy]) - LEAQ (DI)(R10*1), DI // DI = &(dst[idst]) - MOVQ incX+112(FP), R8 // R8 = incX - SHLQ $4, R8 // R8 *= sizeof(complex128) - MOVQ incY+120(FP), R9 // R9 = incY - SHLQ $4, R9 // R9 *= sizeof(complex128) - MOVQ incDst+24(FP), R10 // R10 = incDst - SHLQ $4, R10 // R10 *= sizeof(complex128) - MOVUPS alpha+40(FP), X0 // X0 = { imag(a), real(a) } - MOVAPS X0, X1 - SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } - MOVAPS X0, X10 // Copy X0 and X1 for pipelining - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $3, CX // CX = n % 4 - SHRQ $2, BX // BX = floor( n / 4 ) - JZ axpyi_tail // if BX == 0 { goto axpyi_tail } - -axpyi_loop: // do { - MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVUPS (SI)(R8*1), X4 - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - - MOVUPS (SI), X6 - MOVUPS (SI)(R8*1), X8 - - // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_X2_X3 - MOVDDUP_X4_X5 - MOVDDUP_X6_X7 - MOVDDUP_X8_X9 - - // X_i = { imag(x[i]), imag(x[i]) } - SHUFPD $0x3, X2, X2 - SHUFPD $0x3, X4, X4 - SHUFPD $0x3, X6, X6 - SHUFPD $0x3, X8, X8 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X1, X2 - MULPD X0, X3 - MULPD X11, X4 - MULPD X10, X5 - MULPD X1, X6 - MULPD X0, X7 - MULPD X11, X8 - MULPD X10, X9 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DX), X3 - ADDPD (DX)(R9*1), X5 - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - ADDPD (DX), X7 - ADDPD (DX)(R9*1), X9 - MOVUPS X3, (DI) // dst[i] = X_(i+1) - MOVUPS X5, (DI)(R10*1) - LEAQ (DI)(R10*2), DI - MOVUPS X7, (DI) - MOVUPS X9, (DI)(R10*1) - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) - DECQ BX - JNZ axpyi_loop // } while --BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE axpyi_end - -axpyi_tail: // do { - MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } - SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } - MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DX), X3 - MOVUPS X3, (DI) // y[i] X_(i+1) - ADDQ R8, SI // SI += incX - ADDQ R9, DX // DX += incY - ADDQ R10, DI // DI += incDst - LOOP axpyi_tail // } while --CX > 0 - -axpyi_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s deleted file mode 100644 index ccf82896f..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVDDUP X2, X3 -#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA -// MOVDDUP X4, X5 -#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC -// MOVDDUP X6, X7 -#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE -// MOVDDUP X8, X9 -#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 - -// ADDSUBPD X2, X3 -#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA -// ADDSUBPD X4, X5 -#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC -// ADDSUBPD X6, X7 -#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE -// ADDSUBPD X8, X9 -#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyUnitary(alpha complex128, x, y []complex128) -TEXT ·AxpyUnitary(SB), NOSPLIT, $0 - MOVQ x_base+16(FP), SI // SI = &x - MOVQ y_base+40(FP), DI // DI = &y - MOVQ x_len+24(FP), CX // CX = min( len(x), len(y) ) - CMPQ y_len+48(FP), CX - CMOVQLE y_len+48(FP), CX - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - PXOR X0, X0 // Clear work registers and cache-align loop - PXOR X1, X1 - MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } - MOVAPS X0, X1 - SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } - XORQ AX, AX // i = 0 - MOVAPS X0, X10 // Copy X0 and X1 for pipelining - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $3, CX // CX = n % 4 - SHRQ $2, BX // BX = floor( n / 4 ) - JZ caxy_tail // if BX == 0 { goto caxy_tail } - -caxy_loop: // do { - MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVUPS 16(SI)(AX*8), X4 - MOVUPS 32(SI)(AX*8), X6 - MOVUPS 48(SI)(AX*8), X8 - - // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_X2_X3 - MOVDDUP_X4_X5 - MOVDDUP_X6_X7 - MOVDDUP_X8_X9 - - // X_i = { imag(x[i]), imag(x[i]) } - SHUFPD $0x3, X2, X2 - SHUFPD $0x3, X4, X4 - SHUFPD $0x3, X6, X6 - SHUFPD $0x3, X8, X8 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X1, X2 - MULPD X0, X3 - MULPD X11, X4 - MULPD X10, X5 - MULPD X1, X6 - MULPD X0, X7 - MULPD X11, X8 - MULPD X10, X9 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DI)(AX*8), X3 - ADDPD 16(DI)(AX*8), X5 - ADDPD 32(DI)(AX*8), X7 - ADDPD 48(DI)(AX*8), X9 - MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) - MOVUPS X5, 16(DI)(AX*8) - MOVUPS X7, 32(DI)(AX*8) - MOVUPS X9, 48(DI)(AX*8) - ADDQ $8, AX // i += 8 - DECQ BX - JNZ caxy_loop // } while --BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - -caxy_tail: // do { - MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } - SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } - MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DI)(AX*8), X3 - MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) - ADDQ $2, AX // i += 2 - LOOP caxy_tail // } while --CX > 0 - -caxy_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s deleted file mode 100644 index 07ceabca9..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVDDUP X2, X3 -#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA -// MOVDDUP X4, X5 -#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC -// MOVDDUP X6, X7 -#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE -// MOVDDUP X8, X9 -#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 - -// ADDSUBPD X2, X3 -#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA -// ADDSUBPD X4, X5 -#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC -// ADDSUBPD X6, X7 -#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE -// ADDSUBPD X8, X9 -#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyUnitaryTo(dst []complex128, alpha complex64, x, y []complex128) -TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ x_base+40(FP), SI // SI = &x - MOVQ y_base+64(FP), DX // DX = &y - MOVQ x_len+48(FP), CX // CX = min( len(x), len(y), len(dst) ) - CMPQ y_len+72(FP), CX - CMOVQLE y_len+72(FP), CX - CMPQ dst_len+8(FP), CX - CMOVQLE dst_len+8(FP), CX - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - MOVUPS alpha+24(FP), X0 // X0 = { imag(a), real(a) } - MOVAPS X0, X1 - SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } - XORQ AX, AX // i = 0 - MOVAPS X0, X10 // Copy X0 and X1 for pipelining - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $3, CX // CX = n % 4 - SHRQ $2, BX // BX = floor( n / 4 ) - JZ caxy_tail // if BX == 0 { goto caxy_tail } - -caxy_loop: // do { - MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVUPS 16(SI)(AX*8), X4 - MOVUPS 32(SI)(AX*8), X6 - MOVUPS 48(SI)(AX*8), X8 - - // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_X2_X3 // Load and duplicate imag elements (xi, xi) - MOVDDUP_X4_X5 - MOVDDUP_X6_X7 - MOVDDUP_X8_X9 - - // X_i = { imag(x[i]), imag(x[i]) } - SHUFPD $0x3, X2, X2 // duplicate real elements (xr, xr) - SHUFPD $0x3, X4, X4 - SHUFPD $0x3, X6, X6 - SHUFPD $0x3, X8, X8 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X1, X2 - MULPD X0, X3 - MULPD X11, X4 - MULPD X10, X5 - MULPD X1, X6 - MULPD X0, X7 - MULPD X11, X8 - MULPD X10, X9 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DX)(AX*8), X3 - ADDPD 16(DX)(AX*8), X5 - ADDPD 32(DX)(AX*8), X7 - ADDPD 48(DX)(AX*8), X9 - MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) - MOVUPS X5, 16(DI)(AX*8) - MOVUPS X7, 32(DI)(AX*8) - MOVUPS X9, 48(DI)(AX*8) - ADDQ $8, AX // i += 8 - DECQ BX - JNZ caxy_loop // } while --BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - -caxy_tail: // Same calculation, but read in values to avoid trampling memory - MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } - SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } - MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - - // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - ADDPD (DX)(AX*8), X3 - MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) - ADDQ $2, AX // i += 2 - LOOP caxy_tail // } while --CX > 0 - -caxy_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go deleted file mode 100644 index 8802ff138..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package c128 provides complex128 vector primitives. -package c128 // import "gonum.org/v1/gonum/internal/asm/c128" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s deleted file mode 100644 index 03c07db9f..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 -#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 -#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 -#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 - -#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 -#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 -#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 -#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 - -#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 -#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 -#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 -#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define INC_X R8 -#define INCx3_X R9 -#define INC_Y R10 -#define INCx3_Y R11 -#define NEG1 X15 -#define P_NEG1 X14 - -// func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) -TEXT ·DotcInc(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - MOVQ n+48(FP), LEN // LEN = n - PXOR SUM, SUM // SUM = 0 - CMPQ LEN, $0 // if LEN == 0 { return } - JE dot_end - PXOR P_SUM, P_SUM // P_SUM = 0 - MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) - SHLQ $4, INC_X - MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) - SHLQ $4, INC_Y - LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) - LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) - MOVQ incX+56(FP), INC_X // INC_X = incX - SHLQ $4, INC_X // INC_X *= sizeof(complex128) - MOVQ incY+64(FP), INC_Y // INC_Y = incY - SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) - MOVSD $(-1.0), NEG1 - SHUFPD $0, NEG1, NEG1 // { -1, -1 } - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = n % 4 - SHRQ $2, LEN // LEN = floor( n / 4 ) - JZ dot_tail // if n <= 4 { goto dot_tail } - MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) - -dot_loop: // do { - MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_XPTR_INCX__X5 - MOVDDUP_XPTR_INCX_2__X7 - MOVDDUP_XPTR_INCx3X__X9 - - MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } - MOVDDUP_8_XPTR_INCX__X4 - MOVDDUP_8_XPTR_INCX_2__X6 - MOVDDUP_8_XPTR_INCx3X__X8 - - // X_i = { -imag(x[i]), -imag(x[i]) } - MULPD NEG1, X2 - MULPD P_NEG1, X4 - MULPD NEG1, X6 - MULPD P_NEG1, X8 - - // X_j = { imag(y[i]), real(y[i]) } - MOVUPS (Y_PTR), X10 - MOVUPS (Y_PTR)(INC_Y*1), X11 - MOVUPS (Y_PTR)(INC_Y*2), X12 - MOVUPS (Y_PTR)(INCx3_Y*1), X13 - - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X10, X3 - MULPD X11, X5 - MULPD X12, X7 - MULPD X13, X9 - - // X_j = { real(y[i]), imag(y[i]) } - SHUFPD $0x1, X10, X10 - SHUFPD $0x1, X11, X11 - SHUFPD $0x1, X12, X12 - SHUFPD $0x1, X13, X13 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X10, X2 - MULPD X11, X4 - MULPD X12, X6 - MULPD X13, X8 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // psum += result[i] - ADDPD X3, SUM - ADDPD X5, P_SUM - ADDPD X7, SUM - ADDPD X9, P_SUM - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) - - DECQ LEN - JNZ dot_loop // } while --LEN > 0 - ADDPD P_SUM, SUM // sum += psum - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail: // do { - MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } - MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } - MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } - MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } - MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDPD X3, SUM // sum += result[i] - ADDQ INC_X, X_PTR // X_PTR += incX - ADDQ INC_Y, Y_PTR // Y_PTR += incY - DECQ TAIL - JNZ dot_tail // } while --TAIL > 0 - -dot_end: - MOVUPS SUM, sum+88(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s deleted file mode 100644 index adce85e1d..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 -#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 -#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 -#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 - -#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 -#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 -#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 -#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 - -#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 -#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 -#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 -#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define IDX AX -#define I_IDX DX -#define NEG1 X15 -#define P_NEG1 X14 - -// func DotcUnitary(x, y []complex128) (sum complex128) -TEXT ·DotcUnitary(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) - CMPQ y_len+32(FP), LEN - CMOVQLE y_len+32(FP), LEN - PXOR SUM, SUM // sum = 0 - CMPQ LEN, $0 // if LEN == 0 { return } - JE dot_end - XORPS P_SUM, P_SUM // psum = 0 - MOVSD $(-1.0), NEG1 - SHUFPD $0, NEG1, NEG1 // { -1, -1 } - XORQ IDX, IDX // i := 0 - MOVQ $1, I_IDX // j := 1 - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = floor( TAIL / 4 ) - SHRQ $2, LEN // LEN = TAIL % 4 - JZ dot_tail // if LEN == 0 { goto dot_tail } - - MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining - -dot_loop: // do { - MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_16_XPTR_IDX_8__X5 - MOVDDUP_32_XPTR_IDX_8__X7 - MOVDDUP_48_XPTR_IDX_8__X9 - - MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } - MOVDDUP_16_XPTR_IIDX_8__X4 - MOVDDUP_32_XPTR_IIDX_8__X6 - MOVDDUP_48_XPTR_IIDX_8__X8 - - // X_i = { -imag(x[i]), -imag(x[i]) } - MULPD NEG1, X2 - MULPD P_NEG1, X4 - MULPD NEG1, X6 - MULPD P_NEG1, X8 - - // X_j = { imag(y[i]), real(y[i]) } - MOVUPS (Y_PTR)(IDX*8), X10 - MOVUPS 16(Y_PTR)(IDX*8), X11 - MOVUPS 32(Y_PTR)(IDX*8), X12 - MOVUPS 48(Y_PTR)(IDX*8), X13 - - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X10, X3 - MULPD X11, X5 - MULPD X12, X7 - MULPD X13, X9 - - // X_j = { real(y[i]), imag(y[i]) } - SHUFPD $0x1, X10, X10 - SHUFPD $0x1, X11, X11 - SHUFPD $0x1, X12, X12 - SHUFPD $0x1, X13, X13 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X10, X2 - MULPD X11, X4 - MULPD X12, X6 - MULPD X13, X8 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // psum += result[i] - ADDPD X3, SUM - ADDPD X5, P_SUM - ADDPD X7, SUM - ADDPD X9, P_SUM - - ADDQ $8, IDX // IDX += 8 - ADDQ $8, I_IDX // I_IDX += 8 - DECQ LEN - JNZ dot_loop // } while --LEN > 0 - ADDPD P_SUM, SUM // sum += psum - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail: // do { - MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i]) , real(x[i]) } - MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } - MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } - MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } - MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } - MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDPD X3, SUM // SUM += result[i] - ADDQ $2, IDX // IDX += 2 - ADDQ $2, I_IDX // I_IDX += 2 - DECQ TAIL - JNZ dot_tail // } while --TAIL > 0 - -dot_end: - MOVUPS SUM, sum+48(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s deleted file mode 100644 index 5b15444d8..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 -#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 -#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 -#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 - -#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 -#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 -#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 -#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 - -#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 -#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 -#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 -#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define INC_X R8 -#define INCx3_X R9 -#define INC_Y R10 -#define INCx3_Y R11 - -// func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) -TEXT ·DotuInc(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - MOVQ n+48(FP), LEN // LEN = n - PXOR SUM, SUM // sum = 0 - CMPQ LEN, $0 // if LEN == 0 { return } - JE dot_end - MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) - SHLQ $4, INC_X - MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) - SHLQ $4, INC_Y - LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) - LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) - MOVQ incX+56(FP), INC_X // INC_X = incX - SHLQ $4, INC_X // INC_X *= sizeof(complex128) - MOVQ incY+64(FP), INC_Y // INC_Y = incY - SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) - MOVQ LEN, TAIL - ANDQ $3, TAIL // LEN = LEN % 4 - SHRQ $2, LEN // LEN = floor( LEN / 4 ) - JZ dot_tail // if LEN <= 4 { goto dot_tail } - PXOR P_SUM, P_SUM // psum = 0 - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) - -dot_loop: // do { - MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_XPTR_INCX__X5 - MOVDDUP_XPTR_INCX_2__X7 - MOVDDUP_XPTR_INCx3X__X9 - - MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } - MOVDDUP_8_XPTR_INCX__X4 - MOVDDUP_8_XPTR_INCX_2__X6 - MOVDDUP_8_XPTR_INCx3X__X8 - - // X_j = { imag(y[i]), real(y[i]) } - MOVUPS (Y_PTR), X10 - MOVUPS (Y_PTR)(INC_Y*1), X11 - MOVUPS (Y_PTR)(INC_Y*2), X12 - MOVUPS (Y_PTR)(INCx3_Y*1), X13 - - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X10, X3 - MULPD X11, X5 - MULPD X12, X7 - MULPD X13, X9 - - // X_j = { real(y[i]), imag(y[i]) } - SHUFPD $0x1, X10, X10 - SHUFPD $0x1, X11, X11 - SHUFPD $0x1, X12, X12 - SHUFPD $0x1, X13, X13 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X10, X2 - MULPD X11, X4 - MULPD X12, X6 - MULPD X13, X8 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // psum += result[i] - ADDPD X3, SUM - ADDPD X5, P_SUM - ADDPD X7, SUM - ADDPD X9, P_SUM - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) - - DECQ LEN - JNZ dot_loop // } while --BX > 0 - ADDPD P_SUM, SUM // sum += psum - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail: // do { - MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } - MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } - MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } - MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDPD X3, SUM // sum += result[i] - ADDQ INC_X, X_PTR // X_PTR += incX - ADDQ INC_Y, Y_PTR // Y_PTR += incY - DECQ TAIL // --TAIL - JNZ dot_tail // } while TAIL > 0 - -dot_end: - MOVUPS SUM, sum+88(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s deleted file mode 100644 index a45f31e9e..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 -#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 -#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 -#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 - -#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 -#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 -#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 -#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 - -#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 -#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 -#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 -#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define IDX AX -#define I_IDX DX - -// func DotuUnitary(x, y []complex128) (sum complex128) -TEXT ·DotuUnitary(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) - CMPQ y_len+32(FP), LEN - CMOVQLE y_len+32(FP), LEN - PXOR SUM, SUM // SUM = 0 - CMPQ LEN, $0 // if LEN == 0 { return } - JE dot_end - PXOR P_SUM, P_SUM // P_SUM = 0 - XORQ IDX, IDX // IDX = 0 - MOVQ $1, DX // j = 1 - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = floor( LEN / 4 ) - SHRQ $2, LEN // LEN = LEN % 4 - JZ dot_tail // if LEN == 0 { goto dot_tail } - -dot_loop: // do { - MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_16_XPTR_IDX_8__X5 - MOVDDUP_32_XPTR_IDX_8__X7 - MOVDDUP_48_XPTR_IDX_8__X9 - - MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } - MOVDDUP_16_XPTR_IIDX_8__X4 - MOVDDUP_32_XPTR_IIDX_8__X6 - MOVDDUP_48_XPTR_IIDX_8__X8 - - // X_j = { imag(y[i]), real(y[i]) } - MOVUPS (Y_PTR)(IDX*8), X10 - MOVUPS 16(Y_PTR)(IDX*8), X11 - MOVUPS 32(Y_PTR)(IDX*8), X12 - MOVUPS 48(Y_PTR)(IDX*8), X13 - - // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPD X10, X3 - MULPD X11, X5 - MULPD X12, X7 - MULPD X13, X9 - - // X_j = { real(y[i]), imag(y[i]) } - SHUFPD $0x1, X10, X10 - SHUFPD $0x1, X11, X11 - SHUFPD $0x1, X12, X12 - SHUFPD $0x1, X13, X13 - - // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPD X10, X2 - MULPD X11, X4 - MULPD X12, X6 - MULPD X13, X8 - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - // psum += result[i] - ADDPD X3, SUM - ADDPD X5, P_SUM - ADDPD X7, SUM - ADDPD X9, P_SUM - - ADDQ $8, IDX // IDX += 8 - ADDQ $8, I_IDX // I_IDX += 8 - DECQ LEN - JNZ dot_loop // } while --LEN > 0 - ADDPD P_SUM, SUM // SUM += P_SUM - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail: // do { - MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i] , real(x[i]) } - MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } - MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } - MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } - SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } - MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDPD X3, SUM // psum += result[i] - ADDQ $2, IDX // IDX += 2 - ADDQ $2, I_IDX // I_IDX += 2 - DECQ TAIL // --TAIL - JNZ dot_tail // } while TAIL > 0 - -dot_end: - MOVUPS SUM, sum+48(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s deleted file mode 100644 index d8fd54d22..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SRC SI -#define DST SI -#define LEN CX -#define TAIL BX -#define INC R9 -#define INC3 R10 -#define ALPHA X0 -#define ALPHA_2 X1 - -#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 - -// func DscalInc(alpha float64, x []complex128, n, inc uintptr) -TEXT ·DscalInc(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), SRC // SRC = &x - MOVQ n+32(FP), LEN // LEN = n - CMPQ LEN, $0 // if LEN == 0 { return } - JE dscal_end - - MOVDDUP_ALPHA // ALPHA = alpha - MOVQ inc+40(FP), INC // INC = inc - SHLQ $4, INC // INC = INC * sizeof(complex128) - LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC - MOVUPS ALPHA, ALPHA_2 // Copy ALPHA and ALPHA_2 for pipelining - MOVQ LEN, TAIL // TAIL = LEN - SHRQ $2, LEN // LEN = floor( n / 4 ) - JZ dscal_tail // if LEN == 0 { goto dscal_tail } - -dscal_loop: // do { - MOVUPS (SRC), X2 // X_i = x[i] - MOVUPS (SRC)(INC*1), X3 - MOVUPS (SRC)(INC*2), X4 - MOVUPS (SRC)(INC3*1), X5 - - MULPD ALPHA, X2 // X_i *= ALPHA - MULPD ALPHA_2, X3 - MULPD ALPHA, X4 - MULPD ALPHA_2, X5 - - MOVUPS X2, (DST) // x[i] = X_i - MOVUPS X3, (DST)(INC*1) - MOVUPS X4, (DST)(INC*2) - MOVUPS X5, (DST)(INC3*1) - - LEAQ (SRC)(INC*4), SRC // SRC += INC*4 - DECQ LEN - JNZ dscal_loop // } while --LEN > 0 - -dscal_tail: - ANDQ $3, TAIL // TAIL = TAIL % 4 - JE dscal_end // if TAIL == 0 { return } - -dscal_tail_loop: // do { - MOVUPS (SRC), X2 // X_i = x[i] - MULPD ALPHA, X2 // X_i *= ALPHA - MOVUPS X2, (DST) // x[i] = X_i - ADDQ INC, SRC // SRC += INC - DECQ TAIL - JNZ dscal_tail_loop // } while --TAIL > 0 - -dscal_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s deleted file mode 100644 index 6ed900a66..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SRC SI -#define DST SI -#define LEN CX -#define IDX AX -#define TAIL BX -#define ALPHA X0 -#define ALPHA_2 X1 - -#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 - -// func DscalUnitary(alpha float64, x []complex128) -TEXT ·DscalUnitary(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), SRC // SRC = &x - MOVQ x_len+16(FP), LEN // LEN = len(x) - CMPQ LEN, $0 // if LEN == 0 { return } - JE dscal_end - - MOVDDUP_ALPHA // ALPHA = alpha - XORQ IDX, IDX // IDX = 0 - MOVUPS ALPHA, ALPHA_2 // Copy ALPHA to ALPHA_2 for pipelining - MOVQ LEN, TAIL // TAIL = LEN - SHRQ $2, LEN // LEN = floor( n / 4 ) - JZ dscal_tail // if LEN == 0 { goto dscal_tail } - -dscal_loop: // do { - MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] - MOVUPS 16(SRC)(IDX*8), X3 - MOVUPS 32(SRC)(IDX*8), X4 - MOVUPS 48(SRC)(IDX*8), X5 - - MULPD ALPHA, X2 // X_i *= ALPHA - MULPD ALPHA_2, X3 - MULPD ALPHA, X4 - MULPD ALPHA_2, X5 - - MOVUPS X2, (DST)(IDX*8) // x[i] = X_i - MOVUPS X3, 16(DST)(IDX*8) - MOVUPS X4, 32(DST)(IDX*8) - MOVUPS X5, 48(DST)(IDX*8) - - ADDQ $8, IDX // IDX += 8 - DECQ LEN - JNZ dscal_loop // } while --LEN > 0 - -dscal_tail: - ANDQ $3, TAIL // TAIL = TAIL % 4 - JZ dscal_end // if TAIL == 0 { return } - -dscal_tail_loop: // do { - MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] - MULPD ALPHA, X2 // X_i *= ALPHA - MOVUPS X2, (DST)(IDX*8) // x[i] = X_i - ADDQ $2, IDX // IDX += 2 - DECQ TAIL - JNZ dscal_tail_loop // } while --TAIL > 0 - -dscal_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go deleted file mode 100644 index 47a80e50c..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package c128 - -// ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } -func ScalUnitaryTo(dst []complex128, alpha complex128, x []complex128) { - for i, v := range x { - dst[i] = alpha * v - } -} - -// ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } -func ScalIncTo(dst []complex128, incDst uintptr, alpha complex128, x []complex128, n, incX uintptr) { - var idst, ix uintptr - for i := 0; i < int(n); i++ { - dst[idst] = alpha * x[ix] - ix += incX - idst += incDst - } -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s deleted file mode 100644 index f08590e1b..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SRC SI -#define DST SI -#define LEN CX -#define IDX AX -#define TAIL BX -#define ALPHA X0 -#define ALPHA_C X1 -#define ALPHA2 X10 -#define ALPHA_C2 X11 - -#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 -#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 -#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 -#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 - -#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 -#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 -#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 -#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 - -// func ScalUnitary(alpha complex128, x []complex128) -TEXT ·ScalUnitary(SB), NOSPLIT, $0 - MOVQ x_base+16(FP), SRC // SRC = &x - MOVQ x_len+24(FP), LEN // LEN = len(x) - CMPQ LEN, $0 // if LEN == 0 { return } - JE scal_end - - MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } - MOVAPS ALPHA, ALPHA_C - SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } - - XORQ IDX, IDX // IDX = 0 - MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining - MOVAPS ALPHA_C, ALPHA_C2 - MOVQ LEN, TAIL - SHRQ $2, LEN // LEN = floor( n / 4 ) - JZ scal_tail // if BX == 0 { goto scal_tail } - -scal_loop: // do { - MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVUPS 16(SRC)(IDX*8), X4 - MOVUPS 32(SRC)(IDX*8), X6 - MOVUPS 48(SRC)(IDX*8), X8 - - // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_X2_X3 - MOVDDUP_X4_X5 - MOVDDUP_X6_X7 - MOVDDUP_X8_X9 - - // X_i = { imag(x[i]), imag(x[i]) } - SHUFPD $0x3, X2, X2 - SHUFPD $0x3, X4, X4 - SHUFPD $0x3, X6, X6 - SHUFPD $0x3, X8, X8 - - // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } - // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } - MULPD ALPHA_C, X2 - MULPD ALPHA, X3 - MULPD ALPHA_C2, X4 - MULPD ALPHA2, X5 - MULPD ALPHA_C, X6 - MULPD ALPHA, X7 - MULPD ALPHA_C2, X8 - MULPD ALPHA2, X9 - - // X_(i+1) = { - // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), - // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) - MOVUPS X5, 16(DST)(IDX*8) - MOVUPS X7, 32(DST)(IDX*8) - MOVUPS X9, 48(DST)(IDX*8) - ADDQ $8, IDX // IDX += 8 - DECQ LEN - JNZ scal_loop // } while --LEN > 0 - -scal_tail: - ANDQ $3, TAIL // TAIL = TAIL % 4 - JZ scal_end // if TAIL == 0 { return } - -scal_tail_loop: // do { - MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } - SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } - MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } - MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), - // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - - MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) - ADDQ $2, IDX // IDX += 2 - DECQ TAIL - JNZ scal_tail_loop // } while --LEN > 0 - -scal_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s deleted file mode 100644 index 5829ee54b..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SRC SI -#define DST SI -#define LEN CX -#define TAIL BX -#define INC R9 -#define INC3 R10 -#define ALPHA X0 -#define ALPHA_C X1 -#define ALPHA2 X10 -#define ALPHA_C2 X11 - -#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 -#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 -#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 -#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 - -#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 -#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 -#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 -#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 - -// func ScalInc(alpha complex128, x []complex128, n, inc uintptr) -TEXT ·ScalInc(SB), NOSPLIT, $0 - MOVQ x_base+16(FP), SRC // SRC = &x - MOVQ n+40(FP), LEN // LEN = len(x) - CMPQ LEN, $0 - JE scal_end // if LEN == 0 { return } - - MOVQ inc+48(FP), INC // INC = inc - SHLQ $4, INC // INC = INC * sizeof(complex128) - LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC - - MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } - MOVAPS ALPHA, ALPHA_C - SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } - - MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining - MOVAPS ALPHA_C, ALPHA_C2 - MOVQ LEN, TAIL - SHRQ $2, LEN // LEN = floor( n / 4 ) - JZ scal_tail // if BX == 0 { goto scal_tail } - -scal_loop: // do { - MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVUPS (SRC)(INC*1), X4 - MOVUPS (SRC)(INC*2), X6 - MOVUPS (SRC)(INC3*1), X8 - - // X_(i+1) = { real(x[i], real(x[i]) } - MOVDDUP_X2_X3 - MOVDDUP_X4_X5 - MOVDDUP_X6_X7 - MOVDDUP_X8_X9 - - // X_i = { imag(x[i]), imag(x[i]) } - SHUFPD $0x3, X2, X2 - SHUFPD $0x3, X4, X4 - SHUFPD $0x3, X6, X6 - SHUFPD $0x3, X8, X8 - - // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } - // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } - MULPD ALPHA_C, X2 - MULPD ALPHA, X3 - MULPD ALPHA_C2, X4 - MULPD ALPHA2, X5 - MULPD ALPHA_C, X6 - MULPD ALPHA, X7 - MULPD ALPHA_C2, X8 - MULPD ALPHA2, X9 - - // X_(i+1) = { - // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), - // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - ADDSUBPD_X4_X5 - ADDSUBPD_X6_X7 - ADDSUBPD_X8_X9 - - MOVUPS X3, (DST) // x[i] = X_(i+1) - MOVUPS X5, (DST)(INC*1) - MOVUPS X7, (DST)(INC*2) - MOVUPS X9, (DST)(INC3*1) - - LEAQ (SRC)(INC*4), SRC // SRC = &(SRC[inc*4]) - DECQ LEN - JNZ scal_loop // } while --BX > 0 - -scal_tail: - ANDQ $3, TAIL // TAIL = TAIL % 4 - JE scal_end // if TAIL == 0 { return } - -scal_tail_loop: // do { - MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } - MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } - SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } - MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } - MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } - - // X_(i+1) = { - // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), - // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) - // } - ADDSUBPD_X2_X3 - - MOVUPS X3, (DST) // x[i] = X_i - ADDQ INC, SRC // SRC = &(SRC[incX]) - DECQ TAIL - JNZ scal_tail_loop // } while --TAIL > 0 - -scal_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go deleted file mode 100644 index ad6b23ca4..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -package c128 - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha complex128, x, y []complex128) - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) - -// DscalUnitary is -// for i, v := range x { -// x[i] = complex(real(v)*alpha, imag(v)*alpha) -// } -func DscalUnitary(alpha float64, x []complex128) - -// DscalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) -// ix += inc -// } -func DscalInc(alpha float64, x []complex128, n, inc uintptr) - -// ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } -func ScalInc(alpha complex128, x []complex128, n, inc uintptr) - -// ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } -func ScalUnitary(alpha complex128, x []complex128) - -// DotcUnitary is -// for i, v := range x { -// sum += y[i] * cmplx.Conj(v) -// } -// return sum -func DotcUnitary(x, y []complex128) (sum complex128) - -// DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * cmplx.Conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum -func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) - -// DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotuUnitary(x, y []complex128) (sum complex128) - -// DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go deleted file mode 100644 index 6313e571c..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package c128 - -import "math/cmplx" - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha complex128, x, y []complex128) { - for i, v := range x { - y[i] += alpha * v - } -} - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) { - for i, v := range x { - dst[i] = alpha*v + y[i] - } -} - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - dst[idst] = alpha*x[ix] + y[iy] - ix += incX - iy += incY - idst += incDst - } -} - -// DscalUnitary is -// for i, v := range x { -// x[i] = complex(real(v)*alpha, imag(v)*alpha) -// } -func DscalUnitary(alpha float64, x []complex128) { - for i, v := range x { - x[i] = complex(real(v)*alpha, imag(v)*alpha) - } -} - -// DscalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) -// ix += inc -// } -func DscalInc(alpha float64, x []complex128, n, inc uintptr) { - var ix uintptr - for i := 0; i < int(n); i++ { - x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) - ix += inc - } -} - -// ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } -func ScalInc(alpha complex128, x []complex128, n, inc uintptr) { - var ix uintptr - for i := 0; i < int(n); i++ { - x[ix] *= alpha - ix += inc - } -} - -// ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } -func ScalUnitary(alpha complex128, x []complex128) { - for i := range x { - x[i] *= alpha - } -} - -// DotcUnitary is -// for i, v := range x { -// sum += y[i] * cmplx.Conj(v) -// } -// return sum -func DotcUnitary(x, y []complex128) (sum complex128) { - for i, v := range x { - sum += y[i] * cmplx.Conj(v) - } - return sum -} - -// DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * cmplx.Conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum -func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { - for i := 0; i < int(n); i++ { - sum += y[iy] * cmplx.Conj(x[ix]) - ix += incX - iy += incY - } - return sum -} - -// DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotuUnitary(x, y []complex128) (sum complex128) { - for i, v := range x { - sum += y[i] * v - } - return sum -} - -// DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return sum -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s deleted file mode 100644 index 5f1051164..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVSHDUP X3, X2 -#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 -// MOVSLDUP X3, X3 -#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB -// ADDSUBPS X2, X3 -#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA - -// MOVSHDUP X5, X4 -#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 -// MOVSLDUP X5, X5 -#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED -// ADDSUBPS X4, X5 -#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC - -// MOVSHDUP X7, X6 -#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 -// MOVSLDUP X7, X7 -#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF -// ADDSUBPS X6, X7 -#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE - -// MOVSHDUP X9, X8 -#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 -// MOVSLDUP X9, X9 -#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 -// ADDSUBPS X8, X9 -#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyInc(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), SI // SI = &x - MOVQ y_base+32(FP), DI // DI = &y - MOVQ n+56(FP), CX // CX = n - CMPQ CX, $0 // if n==0 { return } - JE axpyi_end - MOVQ ix+80(FP), R8 // R8 = ix - MOVQ iy+88(FP), R9 // R9 = iy - LEAQ (SI)(R8*8), SI // SI = &(x[ix]) - LEAQ (DI)(R9*8), DI // DI = &(y[iy]) - MOVQ DI, DX // DX = DI // Read/Write pointers - MOVQ incX+64(FP), R8 // R8 = incX - SHLQ $3, R8 // R8 *= sizeof(complex64) - MOVQ incY+72(FP), R9 // R9 = incY - SHLQ $3, R9 // R9 *= sizeof(complex64) - MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) } - MOVAPS X0, X1 - SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) } - MOVAPS X0, X10 // Copy X0 and X1 for pipelining - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $3, CX // CX = n % 4 - SHRQ $2, BX // BX = floor( n / 4 ) - JZ axpyi_tail // if BX == 0 { goto axpyi_tail } - -axpyi_loop: // do { - MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) } - MOVSD (SI)(R8*1), X5 - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - MOVSD (SI), X7 - MOVSD (SI)(R8*1), X9 - - // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSHDUP_X3_X2 - MOVSHDUP_X5_X4 - MOVSHDUP_X7_X6 - MOVSHDUP_X9_X8 - - // X_i = { real(x[i]), real(x[i]) } - MOVSLDUP_X3_X3 - MOVSLDUP_X5_X5 - MOVSLDUP_X7_X7 - MOVSLDUP_X9_X9 - - // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPS X1, X2 - MULPS X0, X3 - MULPS X11, X4 - MULPS X10, X5 - MULPS X1, X6 - MULPS X0, X7 - MULPS X11, X8 - MULPS X10, X9 - - // X_i = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), - // } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - MOVSD (DX), X2 - MOVSD (DX)(R9*1), X4 - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - MOVSD (DX), X6 - MOVSD (DX)(R9*1), X8 - ADDPS X2, X3 - ADDPS X4, X5 - ADDPS X6, X7 - ADDPS X8, X9 - - MOVSD X3, (DI) // y[i] = X_i - MOVSD X5, (DI)(R9*1) - LEAQ (DI)(R9*2), DI // DI = &(DI[incDst]) - MOVSD X7, (DI) - MOVSD X9, (DI)(R9*1) - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - LEAQ (DI)(R9*2), DI // DI = &(DI[incDst]) - DECQ BX - JNZ axpyi_loop // } while --BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE axpyi_end - -axpyi_tail: // do { - MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) } - MOVSHDUP_X3_X2 // X_(i-1) = { real(x[i]), real(x[i]) } - MOVSLDUP_X3_X3 // X_i = { imag(x[i]), imag(x[i]) } - - // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } - // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPS X1, X2 - MULPS X0, X3 - - // X_i = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), - // } - ADDSUBPS_X2_X3 // (ai*x1r+ar*x1i, ar*x1r-ai*x1i) - - // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - MOVSD (DI), X4 - ADDPS X4, X3 - MOVSD X3, (DI) // y[i] = X_i - ADDQ R8, SI // SI += incX - ADDQ R9, DI // DI += incY - LOOP axpyi_tail // } while --CX > 0 - -axpyi_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s deleted file mode 100644 index 5b0e2848a..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVSHDUP X3, X2 -#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 -// MOVSLDUP X3, X3 -#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB -// ADDSUBPS X2, X3 -#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA - -// MOVSHDUP X5, X4 -#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 -// MOVSLDUP X5, X5 -#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED -// ADDSUBPS X4, X5 -#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC - -// MOVSHDUP X7, X6 -#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 -// MOVSLDUP X7, X7 -#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF -// ADDSUBPS X6, X7 -#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE - -// MOVSHDUP X9, X8 -#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 -// MOVSLDUP X9, X9 -#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 -// ADDSUBPS X8, X9 -#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyIncTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ x_base+48(FP), SI // SI = &x - MOVQ y_base+72(FP), DX // DX = &y - MOVQ n+96(FP), CX // CX = n - CMPQ CX, $0 // if n==0 { return } - JE axpyi_end - MOVQ ix+120(FP), R8 // Load the first index - MOVQ iy+128(FP), R9 - MOVQ idst+32(FP), R10 - LEAQ (SI)(R8*8), SI // SI = &(x[ix]) - LEAQ (DX)(R9*8), DX // DX = &(y[iy]) - LEAQ (DI)(R10*8), DI // DI = &(dst[idst]) - MOVQ incX+104(FP), R8 // Incrementors*8 for easy iteration (ADDQ) - SHLQ $3, R8 - MOVQ incY+112(FP), R9 - SHLQ $3, R9 - MOVQ incDst+24(FP), R10 - SHLQ $3, R10 - MOVSD alpha+40(FP), X0 // X0 = { 0, 0, imag(a), real(a) } - MOVAPS X0, X1 - SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) } - MOVAPS X0, X10 // Copy X0 and X1 for pipelining - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $3, CX // CX = n % 4 - SHRQ $2, BX // BX = floor( n / 4 ) - JZ axpyi_tail // if BX == 0 { goto axpyi_tail } - -axpyi_loop: // do { - MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSD (SI)(R8*1), X5 - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - MOVSD (SI), X7 - MOVSD (SI)(R8*1), X9 - - // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSHDUP_X3_X2 - MOVSHDUP_X5_X4 - MOVSHDUP_X7_X6 - MOVSHDUP_X9_X8 - - // X_i = { real(x[i]), real(x[i]) } - MOVSLDUP_X3_X3 - MOVSLDUP_X5_X5 - MOVSLDUP_X7_X7 - MOVSLDUP_X9_X9 - - // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } - MULPS X1, X2 - MULPS X0, X3 - MULPS X11, X4 - MULPS X10, X5 - MULPS X1, X6 - MULPS X0, X7 - MULPS X11, X8 - MULPS X10, X9 - - // X_i = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), - // } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - MOVSD (DX), X2 - MOVSD (DX)(R9*1), X4 - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - MOVSD (DX), X6 - MOVSD (DX)(R9*1), X8 - ADDPS X2, X3 - ADDPS X4, X5 - ADDPS X6, X7 - ADDPS X8, X9 - - MOVSD X3, (DI) // y[i] = X_i - MOVSD X5, (DI)(R10*1) - LEAQ (DI)(R10*2), DI // DI = &(DI[incDst]) - MOVSD X7, (DI) - MOVSD X9, (DI)(R10*1) - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - LEAQ (DI)(R10*2), DI // DI = &(DI[incDst]) - DECQ BX - JNZ axpyi_loop // } while --BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE axpyi_end - -axpyi_tail: - MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } - - // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } - // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPS X1, X2 - MULPS X0, X3 - - // X_i = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), - // } - ADDSUBPS_X2_X3 - - // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } - MOVSD (DX), X4 - ADDPS X4, X3 - MOVSD X3, (DI) // y[i] = X_i - ADDQ R8, SI // SI += incX - ADDQ R9, DX // DX += incY - ADDQ R10, DI // DI += incDst - LOOP axpyi_tail // } while --CX > 0 - -axpyi_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s deleted file mode 100644 index c38cb3c50..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVSHDUP X3, X2 -#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 -// MOVSLDUP X3, X3 -#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB -// ADDSUBPS X2, X3 -#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA - -// MOVSHDUP X5, X4 -#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 -// MOVSLDUP X5, X5 -#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED -// ADDSUBPS X4, X5 -#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC - -// MOVSHDUP X7, X6 -#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 -// MOVSLDUP X7, X7 -#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF -// ADDSUBPS X6, X7 -#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE - -// MOVSHDUP X9, X8 -#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 -// MOVSLDUP X9, X9 -#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 -// ADDSUBPS X8, X9 -#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyUnitary(alpha complex64, x, y []complex64) -TEXT ·AxpyUnitary(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), SI // SI = &x - MOVQ y_base+32(FP), DI // DI = &y - MOVQ x_len+16(FP), CX // CX = min( len(x), len(y) ) - CMPQ y_len+40(FP), CX - CMOVQLE y_len+40(FP), CX - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - PXOR X0, X0 // Clear work registers and cache-align loop - PXOR X1, X1 - MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) } - SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) } - MOVAPS X0, X1 - SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) } - XORQ AX, AX // i = 0 - MOVQ DI, BX // Align on 16-byte boundary for ADDPS - ANDQ $15, BX // BX = &y & 15 - JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim } - - // Trim first value in unaligned buffer - XORPS X2, X2 // Clear work registers and cache-align loop - XORPS X3, X3 - XORPS X4, X4 - MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } - MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) } - ADDSUBPS_X2_X3 - MOVSD (DI)(AX*8), X4 // X3 += y[i] - ADDPS X4, X3 - MOVSD X3, (DI)(AX*8) // y[i] = X3 - INCQ AX // i++ - DECQ CX // --CX - JZ caxy_end // if CX == 0 { return } - -caxy_no_trim: - MOVAPS X0, X10 // Copy X0 and X1 for pipelineing - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $7, CX // CX = n % 8 - SHRQ $3, BX // BX = floor( n / 8 ) - JZ caxy_tail // if BX == 0 { goto caxy_tail } - -caxy_loop: // do { - // X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) } - MOVUPS (SI)(AX*8), X3 - MOVUPS 16(SI)(AX*8), X5 - MOVUPS 32(SI)(AX*8), X7 - MOVUPS 48(SI)(AX*8), X9 - - // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } - MOVSHDUP_X3_X2 - MOVSHDUP_X5_X4 - MOVSHDUP_X7_X6 - MOVSHDUP_X9_X8 - - // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } - MOVSLDUP_X3_X3 - MOVSLDUP_X5_X5 - MOVSLDUP_X7_X7 - MOVSLDUP_X9_X9 - - // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]), - // imag(a) * real(x[i+1]), real(a) * real(x[i+1]) } - // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]), - // real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) } - MULPS X1, X2 - MULPS X0, X3 - MULPS X11, X4 - MULPS X10, X5 - MULPS X1, X6 - MULPS X0, X7 - MULPS X11, X8 - MULPS X10, X9 - - // X_i = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), - // imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]), - // real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]), - // } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]), - // imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) } - ADDPS (DI)(AX*8), X3 - ADDPS 16(DI)(AX*8), X5 - ADDPS 32(DI)(AX*8), X7 - ADDPS 48(DI)(AX*8), X9 - MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i - MOVUPS X5, 16(DI)(AX*8) - MOVUPS X7, 32(DI)(AX*8) - MOVUPS X9, 48(DI)(AX*8) - ADDQ $8, AX // i += 8 - DECQ BX // --BX - JNZ caxy_loop // } while BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - -caxy_tail: // do { - MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } - MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(a)*real(x[i]) - imag(a)*imag(x[i]) } - ADDSUBPS_X2_X3 - MOVSD (DI)(AX*8), X4 // X3 += y[i] - ADDPS X4, X3 - MOVSD X3, (DI)(AX*8) // y[i] = X3 - INCQ AX // ++i - LOOP caxy_tail // } while --CX > 0 - -caxy_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s deleted file mode 100644 index fee4bb94f..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// MOVSHDUP X3, X2 -#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 -// MOVSLDUP X3, X3 -#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB -// ADDSUBPS X2, X3 -#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA - -// MOVSHDUP X5, X4 -#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 -// MOVSLDUP X5, X5 -#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED -// ADDSUBPS X4, X5 -#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC - -// MOVSHDUP X7, X6 -#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 -// MOVSLDUP X7, X7 -#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF -// ADDSUBPS X6, X7 -#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE - -// MOVSHDUP X9, X8 -#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 -// MOVSLDUP X9, X9 -#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 -// ADDSUBPS X8, X9 -#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 - -// func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) -TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ x_base+32(FP), SI // SI = &x - MOVQ y_base+56(FP), DX // DX = &y - MOVQ x_len+40(FP), CX - CMPQ y_len+64(FP), CX // CX = min( len(x), len(y), len(dst) ) - CMOVQLE y_len+64(FP), CX - CMPQ dst_len+8(FP), CX - CMOVQLE dst_len+8(FP), CX - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - MOVSD alpha+24(FP), X0 // X0 = { 0, 0, imag(a), real(a) } - SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) } - MOVAPS X0, X1 - SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) } - XORQ AX, AX // i = 0 - MOVQ DX, BX // Align on 16-byte boundary for ADDPS - ANDQ $15, BX // BX = &y & 15 - JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim } - - MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } - MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) } - ADDSUBPS_X2_X3 - MOVSD (DX)(AX*8), X4 // X3 += y[i] - ADDPS X4, X3 - MOVSD X3, (DI)(AX*8) // dst[i] = X3 - INCQ AX // i++ - DECQ CX // --CX - JZ caxy_tail // if BX == 0 { goto caxy_tail } - -caxy_no_trim: - MOVAPS X0, X10 // Copy X0 and X1 for pipelineing - MOVAPS X1, X11 - MOVQ CX, BX - ANDQ $7, CX // CX = n % 8 - SHRQ $3, BX // BX = floor( n / 8 ) - JZ caxy_tail // if BX == 0 { goto caxy_tail } - -caxy_loop: - // X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) } - MOVUPS (SI)(AX*8), X3 - MOVUPS 16(SI)(AX*8), X5 - MOVUPS 32(SI)(AX*8), X7 - MOVUPS 48(SI)(AX*8), X9 - - // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } - MOVSHDUP_X3_X2 - MOVSHDUP_X5_X4 - MOVSHDUP_X7_X6 - MOVSHDUP_X9_X8 - - // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } - MOVSLDUP_X3_X3 - MOVSLDUP_X5_X5 - MOVSLDUP_X7_X7 - MOVSLDUP_X9_X9 - - // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]), - // imag(a) * real(x[i+1]), real(a) * real(x[i+1]) } - // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]), - // real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) } - MULPS X1, X2 - MULPS X0, X3 - MULPS X11, X4 - MULPS X10, X5 - MULPS X1, X6 - MULPS X0, X7 - MULPS X11, X8 - MULPS X10, X9 - - // X_i = { - // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), - // imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]), - // real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]), - // } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]), - // imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) } - ADDPS (DX)(AX*8), X3 - ADDPS 16(DX)(AX*8), X5 - ADDPS 32(DX)(AX*8), X7 - ADDPS 48(DX)(AX*8), X9 - MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i - MOVUPS X5, 16(DI)(AX*8) - MOVUPS X7, 32(DI)(AX*8) - MOVUPS X9, 48(DI)(AX*8) - ADDQ $8, AX // i += 8 - DECQ BX // --BX - JNZ caxy_loop // } while BX > 0 - CMPQ CX, $0 // if CX == 0 { return } - JE caxy_end - -caxy_tail: // do { - MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } - MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } - MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } - - // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), - // real(a)*real(x[i]) - imag(a)*imag(x[i]) } - ADDSUBPS_X2_X3 - MOVSD (DX)(AX*8), X4 // X3 += y[i] - ADDPS X4, X3 - MOVSD X3, (DI)(AX*8) // y[i] = X3 - INCQ AX // ++i - LOOP caxy_tail // } while --CX > 0 - -caxy_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go deleted file mode 100644 index 910e1e5c7..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package c64 - -func conj(c complex64) complex64 { return complex(real(c), -imag(c)) } diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go deleted file mode 100644 index 35f1b2a26..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package c64 provides complex64 vector primitives. -package c64 // import "gonum.org/v1/gonum/internal/asm/c64" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s deleted file mode 100644 index 2161643cd..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 -#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4 -#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6 -#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8 - -#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 -#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5 -#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7 -#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9 - -#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 -#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 -#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 -#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define INC_X R8 -#define INCx3_X R9 -#define INC_Y R10 -#define INCx3_Y R11 -#define NEG1 X15 -#define P_NEG1 X14 - -// func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) -TEXT ·DotcInc(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - PXOR SUM, SUM // SUM = 0 - PXOR P_SUM, P_SUM // P_SUM = 0 - MOVQ n+48(FP), LEN // LEN = n - CMPQ LEN, $0 // if LEN == 0 { return } - JE dotc_end - MOVQ ix+72(FP), INC_X - MOVQ iy+80(FP), INC_Y - LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix]) - LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy]) - MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64) - SHLQ $3, INC_X - MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64) - SHLQ $3, INC_Y - MOVSS $(-1.0), NEG1 - SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 } - - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = LEN % 4 - SHRQ $2, LEN // LEN = floor( LEN / 4 ) - JZ dotc_tail // if LEN == 0 { goto dotc_tail } - - MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 - -dotc_loop: // do { - MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSD (X_PTR)(INC_X*1), X5 - MOVSD (X_PTR)(INC_X*2), X7 - MOVSD (X_PTR)(INCx3_X*1), X9 - - // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSHDUP_X3_X2 - MOVSHDUP_X5_X4 - MOVSHDUP_X7_X6 - MOVSHDUP_X9_X8 - - // X_i = { real(x[i]), real(x[i]) } - MOVSLDUP_X3_X3 - MOVSLDUP_X5_X5 - MOVSLDUP_X7_X7 - MOVSLDUP_X9_X9 - - // X_(i-1) = { -imag(x[i]), -imag(x[i]) } - MULPS NEG1, X2 - MULPS P_NEG1, X4 - MULPS NEG1, X6 - MULPS P_NEG1, X8 - - // X_j = { imag(y[i]), real(y[i]) } - MOVSD (Y_PTR), X10 - MOVSD (Y_PTR)(INC_Y*1), X11 - MOVSD (Y_PTR)(INC_Y*2), X12 - MOVSD (Y_PTR)(INCx3_Y*1), X13 - - // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - MULPS X10, X3 - MULPS X11, X5 - MULPS X12, X7 - MULPS X13, X9 - - // X_j = { real(y[i]), imag(y[i]) } - SHUFPS $0xB1, X10, X10 - SHUFPS $0xB1, X11, X11 - SHUFPS $0xB1, X12, X12 - SHUFPS $0xB1, X13, X13 - - // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - MULPS X10, X2 - MULPS X11, X4 - MULPS X12, X6 - MULPS X13, X8 - - // X_i = { - // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), - // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // SUM += X_i - ADDPS X3, SUM - ADDPS X5, P_SUM - ADDPS X7, SUM - ADDPS X9, P_SUM - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4]) - - DECQ LEN - JNZ dotc_loop // } while --LEN > 0 - - ADDPS P_SUM, SUM // SUM = { P_SUM + SUM } - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dotc_end - -dotc_tail: // do { - MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } - MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } - MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - ADDPS X3, SUM // SUM += X_i - ADDQ INC_X, X_PTR // X_PTR += INC_X - ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y - DECQ TAIL - JNZ dotc_tail // } while --TAIL > 0 - -dotc_end: - MOVSD SUM, sum+88(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s deleted file mode 100644 index 4efc52b1a..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3 -#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5 -#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7 -#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9 - -#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2 -#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4 -#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6 -#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8 - -#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 -#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 - -#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 -#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 -#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 -#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define IDX AX -#define I_IDX DX -#define NEG1 X15 -#define P_NEG1 X14 - -// func DotcUnitary(x, y []complex64) (sum complex64) -TEXT ·DotcUnitary(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - PXOR SUM, SUM // SUM = 0 - PXOR P_SUM, P_SUM // P_SUM = 0 - MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) - CMPQ y_len+32(FP), LEN - CMOVQLE y_len+32(FP), LEN - CMPQ LEN, $0 // if LEN == 0 { return } - JE dotc_end - XORQ IDX, IDX // i = 0 - MOVSS $(-1.0), NEG1 - SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 } - - MOVQ X_PTR, DX - ANDQ $15, DX // DX = &x & 15 - JZ dotc_aligned // if DX == 0 { goto dotc_aligned } - - MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } - MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - - MOVAPS X3, SUM // SUM = X_i - INCQ IDX // IDX++ - DECQ LEN // LEN-- - JZ dotc_ret // if LEN == 0 { goto dotc_ret } - -dotc_aligned: - MOVQ LEN, TAIL - ANDQ $7, TAIL // TAIL = LEN % 8 - SHRQ $3, LEN // LEN = floor( LEN / 8 ) - JZ dotc_tail // if LEN == 0 { return } - MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining - -dotc_loop: // do { - MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } - MOVSLDUP_16_XPTR_IDX_8__X5 - MOVSLDUP_32_XPTR_IDX_8__X7 - MOVSLDUP_48_XPTR_IDX_8__X9 - - MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i+1]), imag(x[i+1]) } - MOVSHDUP_16_XPTR_IDX_8__X4 - MOVSHDUP_32_XPTR_IDX_8__X6 - MOVSHDUP_48_XPTR_IDX_8__X8 - - // X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) } - MOVUPS (Y_PTR)(IDX*8), X10 - MOVUPS 16(Y_PTR)(IDX*8), X11 - MOVUPS 32(Y_PTR)(IDX*8), X12 - MOVUPS 48(Y_PTR)(IDX*8), X13 - - // X_(i-1) = { -imag(x[i]), -imag(x[i]), -imag(x[i]+1), -imag(x[i]+1) } - MULPS NEG1, X2 - MULPS P_NEG1, X4 - MULPS NEG1, X6 - MULPS P_NEG1, X8 - - // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]), - // imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) } - MULPS X10, X3 - MULPS X11, X5 - MULPS X12, X7 - MULPS X13, X9 - - // X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) } - SHUFPS $0xB1, X10, X10 - SHUFPS $0xB1, X11, X11 - SHUFPS $0xB1, X12, X12 - SHUFPS $0xB1, X13, X13 - - // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]), - // real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) } - MULPS X10, X2 - MULPS X11, X4 - MULPS X12, X6 - MULPS X13, X8 - - // X_i = { - // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), - // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]), - // imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]), - // real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]), - // } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // SUM += X_i - ADDPS X3, SUM - ADDPS X5, P_SUM - ADDPS X7, SUM - ADDPS X9, P_SUM - - ADDQ $8, IDX // IDX += 8 - DECQ LEN - JNZ dotc_loop // } while --LEN > 0 - - ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } - XORPS SUM, SUM // SUM = 0 - - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dotc_end - -dotc_tail: - MOVQ TAIL, LEN - SHRQ $1, LEN // LEN = floor( LEN / 2 ) - JZ dotc_tail_one // if LEN == 0 { goto dotc_tail_one } - -dotc_tail_two: // do { - MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } - MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } - MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - - ADDPS X3, SUM // SUM += X_i - - ADDQ $2, IDX // IDX += 2 - DECQ LEN - JNZ dotc_tail_two // } while --LEN > 0 - - ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } - XORPS SUM, SUM // SUM = 0 - - ANDQ $1, TAIL - JZ dotc_end - -dotc_tail_one: - MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } - MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - - ADDPS X3, SUM // SUM += X_i - -dotc_end: - ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] } - MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] } - ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] } - -dotc_ret: - MOVSD SUM, sum+48(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s deleted file mode 100644 index 6b26c5ab7..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 -#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4 -#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6 -#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8 - -#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 -#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5 -#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7 -#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9 - -#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 -#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 -#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 -#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define INC_X R8 -#define INCx3_X R9 -#define INC_Y R10 -#define INCx3_Y R11 - -// func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) -TEXT ·DotuInc(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - PXOR SUM, SUM // SUM = 0 - PXOR P_SUM, P_SUM // P_SUM = 0 - MOVQ n+48(FP), LEN // LEN = n - CMPQ LEN, $0 // if LEN == 0 { return } - JE dotu_end - MOVQ ix+72(FP), INC_X - MOVQ iy+80(FP), INC_Y - LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix]) - LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy]) - MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64) - SHLQ $3, INC_X - MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64) - SHLQ $3, INC_Y - - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = LEN % 4 - SHRQ $2, LEN // LEN = floor( LEN / 4 ) - JZ dotu_tail // if TAIL == 0 { goto dotu_tail } - - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 - -dotu_loop: // do { - MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSD (X_PTR)(INC_X*1), X5 - MOVSD (X_PTR)(INC_X*2), X7 - MOVSD (X_PTR)(INCx3_X*1), X9 - - // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSHDUP_X3_X2 - MOVSHDUP_X5_X4 - MOVSHDUP_X7_X6 - MOVSHDUP_X9_X8 - - // X_i = { real(x[i]), real(x[i]) } - MOVSLDUP_X3_X3 - MOVSLDUP_X5_X5 - MOVSLDUP_X7_X7 - MOVSLDUP_X9_X9 - - // X_j = { imag(y[i]), real(y[i]) } - MOVSD (Y_PTR), X10 - MOVSD (Y_PTR)(INC_Y*1), X11 - MOVSD (Y_PTR)(INC_Y*2), X12 - MOVSD (Y_PTR)(INCx3_Y*1), X13 - - // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - MULPS X10, X3 - MULPS X11, X5 - MULPS X12, X7 - MULPS X13, X9 - - // X_j = { real(y[i]), imag(y[i]) } - SHUFPS $0xB1, X10, X10 - SHUFPS $0xB1, X11, X11 - SHUFPS $0xB1, X12, X12 - SHUFPS $0xB1, X13, X13 - - // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - MULPS X10, X2 - MULPS X11, X4 - MULPS X12, X6 - MULPS X13, X8 - - // X_i = { - // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), - // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // SUM += X_i - ADDPS X3, SUM - ADDPS X5, P_SUM - ADDPS X7, SUM - ADDPS X9, P_SUM - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4]) - - DECQ LEN - JNZ dotu_loop // } while --LEN > 0 - - ADDPS P_SUM, SUM // SUM = { P_SUM + SUM } - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dotu_end - -dotu_tail: // do { - MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } - MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - ADDPS X3, SUM // SUM += X_i - ADDQ INC_X, X_PTR // X_PTR += INC_X - ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y - DECQ TAIL - JNZ dotu_tail // } while --TAIL > 0 - -dotu_end: - MOVSD SUM, sum+88(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s deleted file mode 100644 index 07a115b33..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3 -#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5 -#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7 -#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9 - -#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2 -#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4 -#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6 -#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8 - -#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 -#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 - -#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 -#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 -#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 -#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define SUM X0 -#define P_SUM X1 -#define IDX AX -#define I_IDX DX -#define NEG1 X15 -#define P_NEG1 X14 - -// func DotuUnitary(x, y []complex64) (sum complex64) -TEXT ·DotuUnitary(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - PXOR SUM, SUM // SUM = 0 - PXOR P_SUM, P_SUM // P_SUM = 0 - MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) - CMPQ y_len+32(FP), LEN - CMOVQLE y_len+32(FP), LEN - CMPQ LEN, $0 // if LEN == 0 { return } - JE dotu_end - XORQ IDX, IDX // IDX = 0 - - MOVQ X_PTR, DX - ANDQ $15, DX // DX = &x & 15 - JZ dotu_aligned // if DX == 0 { goto dotu_aligned } - - MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } - MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - - MOVAPS X3, SUM // SUM = X_i - INCQ IDX // IDX++ - DECQ LEN // LEN-- - JZ dotu_end // if LEN == 0 { goto dotu_end } - -dotu_aligned: - MOVQ LEN, TAIL - ANDQ $7, TAIL // TAIL = LEN % 8 - SHRQ $3, LEN // LEN = floor( LEN / 8 ) - JZ dotu_tail // if LEN == 0 { goto dotu_tail } - PXOR P_SUM, P_SUM - -dotu_loop: // do { - MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } - MOVSLDUP_16_XPTR_IDX_8__X5 - MOVSLDUP_32_XPTR_IDX_8__X7 - MOVSLDUP_48_XPTR_IDX_8__X9 - - MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } - MOVSHDUP_16_XPTR_IDX_8__X4 - MOVSHDUP_32_XPTR_IDX_8__X6 - MOVSHDUP_48_XPTR_IDX_8__X8 - - // X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) } - MOVUPS (Y_PTR)(IDX*8), X10 - MOVUPS 16(Y_PTR)(IDX*8), X11 - MOVUPS 32(Y_PTR)(IDX*8), X12 - MOVUPS 48(Y_PTR)(IDX*8), X13 - - // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]), - // imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) } - MULPS X10, X3 - MULPS X11, X5 - MULPS X12, X7 - MULPS X13, X9 - - // X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) } - SHUFPS $0xB1, X10, X10 - SHUFPS $0xB1, X11, X11 - SHUFPS $0xB1, X12, X12 - SHUFPS $0xB1, X13, X13 - - // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]), - // real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) } - MULPS X10, X2 - MULPS X11, X4 - MULPS X12, X6 - MULPS X13, X8 - - // X_i = { - // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), - // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]), - // imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]), - // real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]), - // } - ADDSUBPS_X2_X3 - ADDSUBPS_X4_X5 - ADDSUBPS_X6_X7 - ADDSUBPS_X8_X9 - - // SUM += X_i - ADDPS X3, SUM - ADDPS X5, P_SUM - ADDPS X7, SUM - ADDPS X9, P_SUM - - ADDQ $8, IDX // IDX += 8 - DECQ LEN - JNZ dotu_loop // } while --LEN > 0 - - ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } - XORPS SUM, SUM // SUM = 0 - - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dotu_end - -dotu_tail: - MOVQ TAIL, LEN - SHRQ $1, LEN // LEN = floor( LEN / 2 ) - JZ dotu_tail_one // if LEN == 0 { goto dotc_tail_one } - -dotu_tail_two: // do { - MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } - MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } - MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - - ADDPS X3, SUM // SUM += X_i - - ADDQ $2, IDX // IDX += 2 - DECQ LEN - JNZ dotu_tail_two // } while --LEN > 0 - - ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } - XORPS SUM, SUM // SUM = 0 - - ANDQ $1, TAIL - JZ dotu_end - -dotu_tail_one: - MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } - MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } - MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } - MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } - MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } - SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } - MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } - - // X_i = { - // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), - // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } - ADDSUBPS_X2_X3 - - ADDPS X3, SUM // SUM += X_i - -dotu_end: - ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] } - MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] } - ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] } - -dotu_ret: - MOVSD SUM, sum+48(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go deleted file mode 100644 index a84def876..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package c64 - -// ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } -func ScalUnitary(alpha complex64, x []complex64) { - for i := range x { - x[i] *= alpha - } -} - -// ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } -func ScalUnitaryTo(dst []complex64, alpha complex64, x []complex64) { - for i, v := range x { - dst[i] = alpha * v - } -} - -// ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } -func ScalInc(alpha complex64, x []complex64, n, incX uintptr) { - var ix uintptr - for i := 0; i < int(n); i++ { - x[ix] *= alpha - ix += incX - } -} - -// ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } -func ScalIncTo(dst []complex64, incDst uintptr, alpha complex64, x []complex64, n, incX uintptr) { - var idst, ix uintptr - for i := 0; i < int(n); i++ { - dst[idst] = alpha * x[ix] - ix += incX - idst += incDst - } -} - -// SscalUnitary is -// for i, v := range x { -// x[i] = complex(real(v)*alpha, imag(v)*alpha) -// } -func SscalUnitary(alpha float32, x []complex64) { - for i, v := range x { - x[i] = complex(real(v)*alpha, imag(v)*alpha) - } -} - -// SscalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) -// ix += inc -// } -func SscalInc(alpha float32, x []complex64, n, inc uintptr) { - var ix uintptr - for i := 0; i < int(n); i++ { - x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) - ix += inc - } -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go deleted file mode 100644 index 3e12d6bcd..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -package c64 - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha complex64, x, y []complex64) - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) - -// DotcUnitary is -// for i, v := range x { -// sum += y[i] * conj(v) -// } -// return sum -func DotcUnitary(x, y []complex64) (sum complex64) - -// DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum -func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) - -// DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotuUnitary(x, y []complex64) (sum complex64) - -// DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go deleted file mode 100644 index 411afcb2a..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package c64 - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha complex64, x, y []complex64) { - for i, v := range x { - y[i] += alpha * v - } -} - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) { - for i, v := range x { - dst[i] = alpha*v + y[i] - } -} - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - dst[idst] = alpha*x[ix] + y[iy] - ix += incX - iy += incY - idst += incDst - } -} - -// DotcUnitary is -// for i, v := range x { -// sum += y[i] * conj(v) -// } -// return sum -func DotcUnitary(x, y []complex64) (sum complex64) { - for i, v := range x { - sum += y[i] * conj(v) - } - return sum -} - -// DotcInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * conj(x[ix]) -// ix += incX -// iy += incY -// } -// return sum -func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { - for i := 0; i < int(n); i++ { - sum += y[iy] * conj(x[ix]) - ix += incX - iy += incY - } - return sum -} - -// DotuUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotuUnitary(x, y []complex64) (sum complex64) { - for i, v := range x { - sum += y[i] * v - } - return sum -} - -// DotuInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return sum -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s deleted file mode 100644 index ebf360ff7..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyInc(SB), NOSPLIT, $0 - MOVQ n+56(FP), CX // CX = n - CMPQ CX, $0 // if n==0 { return } - JLE axpyi_end - MOVQ x_base+8(FP), SI // SI = &x - MOVQ y_base+32(FP), DI // DI = &y - MOVQ ix+80(FP), R8 // R8 = ix - MOVQ iy+88(FP), R9 // R9 = iy - LEAQ (SI)(R8*4), SI // SI = &(x[ix]) - LEAQ (DI)(R9*4), DI // DI = &(y[iy]) - MOVQ DI, DX // DX = DI Read Pointer for y - MOVQ incX+64(FP), R8 // R8 = incX - SHLQ $2, R8 // R8 *= sizeof(float32) - MOVQ incY+72(FP), R9 // R9 = incY - SHLQ $2, R9 // R9 *= sizeof(float32) - MOVSS alpha+0(FP), X0 // X0 = alpha - MOVSS X0, X1 // X1 = X0 // for pipelining - MOVQ CX, BX - ANDQ $3, BX // BX = n % 4 - SHRQ $2, CX // CX = floor( n / 4 ) - JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } - -axpyi_loop: // Loop unrolled 4x do { - MOVSS (SI), X2 // X_i = x[i] - MOVSS (SI)(R8*1), X3 - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - MOVSS (SI), X4 - MOVSS (SI)(R8*1), X5 - MULSS X1, X2 // X_i *= a - MULSS X0, X3 - MULSS X1, X4 - MULSS X0, X5 - ADDSS (DX), X2 // X_i += y[i] - ADDSS (DX)(R9*1), X3 - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - ADDSS (DX), X4 - ADDSS (DX)(R9*1), X5 - MOVSS X2, (DI) // y[i] = X_i - MOVSS X3, (DI)(R9*1) - LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) - MOVSS X4, (DI) - MOVSS X5, (DI)(R9*1) - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) - LOOP axpyi_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE axpyi_end - -axpyi_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - -axpyi_tail: // do { - MOVSS (SI), X2 // X2 = x[i] - MULSS X1, X2 // X2 *= a - ADDSS (DI), X2 // X2 += y[i] - MOVSS X2, (DI) // y[i] = X2 - ADDQ R8, SI // SI = &(SI[incX]) - ADDQ R9, DI // DI = &(DI[incY]) - LOOP axpyi_tail // } while --CX > 0 - -axpyi_end: - RET - diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s deleted file mode 100644 index 4e9020e4f..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyIncTo(SB), NOSPLIT, $0 - MOVQ n+96(FP), CX // CX = n - CMPQ CX, $0 // if n==0 { return } - JLE axpyi_end - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ x_base+48(FP), SI // SI = &x - MOVQ y_base+72(FP), DX // DX = &y - MOVQ ix+120(FP), R8 // R8 = ix // Load the first index - MOVQ iy+128(FP), R9 // R9 = iy - MOVQ idst+32(FP), R10 // R10 = idst - LEAQ (SI)(R8*4), SI // SI = &(x[ix]) - LEAQ (DX)(R9*4), DX // DX = &(y[iy]) - LEAQ (DI)(R10*4), DI // DI = &(dst[idst]) - MOVQ incX+104(FP), R8 // R8 = incX - SHLQ $2, R8 // R8 *= sizeof(float32) - MOVQ incY+112(FP), R9 // R9 = incY - SHLQ $2, R9 // R9 *= sizeof(float32) - MOVQ incDst+24(FP), R10 // R10 = incDst - SHLQ $2, R10 // R10 *= sizeof(float32) - MOVSS alpha+40(FP), X0 // X0 = alpha - MOVSS X0, X1 // X1 = X0 // for pipelining - MOVQ CX, BX - ANDQ $3, BX // BX = n % 4 - SHRQ $2, CX // CX = floor( n / 4 ) - JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } - -axpyi_loop: // Loop unrolled 4x do { - MOVSS (SI), X2 // X_i = x[i] - MOVSS (SI)(R8*1), X3 - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) - MOVSS (SI), X4 - MOVSS (SI)(R8*1), X5 - MULSS X1, X2 // X_i *= a - MULSS X0, X3 - MULSS X1, X4 - MULSS X0, X5 - ADDSS (DX), X2 // X_i += y[i] - ADDSS (DX)(R9*1), X3 - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - ADDSS (DX), X4 - ADDSS (DX)(R9*1), X5 - MOVSS X2, (DI) // dst[i] = X_i - MOVSS X3, (DI)(R10*1) - LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) - MOVSS X4, (DI) - MOVSS X5, (DI)(R10*1) - LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses - LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) - LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) - LOOP axpyi_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE axpyi_end - -axpyi_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - -axpyi_tail: // do { - MOVSS (SI), X2 // X2 = x[i] - MULSS X1, X2 // X2 *= a - ADDSS (DX), X2 // X2 += y[i] - MOVSS X2, (DI) // dst[i] = X2 - ADDQ R8, SI // SI = &(SI[incX]) - ADDQ R9, DX // DX = &(DX[incY]) - ADDQ R10, DI // DI = &(DI[incY]) - LOOP axpyi_tail // } while --CX > 0 - -axpyi_end: - RET - diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s deleted file mode 100644 index 224b84255..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func AxpyUnitary(alpha float32, x, y []float32) -TEXT ·AxpyUnitary(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), SI // SI = &x - MOVQ y_base+32(FP), DI // DI = &y - MOVQ x_len+16(FP), BX // BX = min( len(x), len(y) ) - CMPQ y_len+40(FP), BX - CMOVQLE y_len+40(FP), BX - CMPQ BX, $0 // if BX == 0 { return } - JE axpy_end - MOVSS alpha+0(FP), X0 - SHUFPS $0, X0, X0 // X0 = { a, a, a, a } - XORQ AX, AX // i = 0 - PXOR X2, X2 // 2 NOP instructions (PXOR) to align - PXOR X3, X3 // loop to cache line - MOVQ DI, CX - ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS - JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } - - XORQ $0xF, CX // CX = 4 - floor( BX % 16 / 4 ) - INCQ CX - SHRQ $2, CX - -axpy_align: // Trim first value(s) in unaligned buffer do { - MOVSS (SI)(AX*4), X2 // X2 = x[i] - MULSS X0, X2 // X2 *= a - ADDSS (DI)(AX*4), X2 // X2 += y[i] - MOVSS X2, (DI)(AX*4) // y[i] = X2 - INCQ AX // i++ - DECQ BX - JZ axpy_end // if --BX == 0 { return } - LOOP axpy_align // } while --CX > 0 - -axpy_no_trim: - MOVUPS X0, X1 // Copy X0 to X1 for pipelining - MOVQ BX, CX - ANDQ $0xF, BX // BX = len % 16 - SHRQ $4, CX // CX = int( len / 16 ) - JZ axpy_tail4_start // if CX == 0 { return } - -axpy_loop: // Loop unrolled 16x do { - MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] - MOVUPS 16(SI)(AX*4), X3 - MOVUPS 32(SI)(AX*4), X4 - MOVUPS 48(SI)(AX*4), X5 - MULPS X0, X2 // X2 *= a - MULPS X1, X3 - MULPS X0, X4 - MULPS X1, X5 - ADDPS (DI)(AX*4), X2 // X2 += y[i:i+4] - ADDPS 16(DI)(AX*4), X3 - ADDPS 32(DI)(AX*4), X4 - ADDPS 48(DI)(AX*4), X5 - MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 - MOVUPS X3, 16(DI)(AX*4) - MOVUPS X4, 32(DI)(AX*4) - MOVUPS X5, 48(DI)(AX*4) - ADDQ $16, AX // i += 16 - LOOP axpy_loop // while (--CX) > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE axpy_end - -axpy_tail4_start: // Reset loop counter for 4-wide tail loop - MOVQ BX, CX // CX = floor( BX / 4 ) - SHRQ $2, CX - JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } - -axpy_tail4: // Loop unrolled 4x do { - MOVUPS (SI)(AX*4), X2 // X2 = x[i] - MULPS X0, X2 // X2 *= a - ADDPS (DI)(AX*4), X2 // X2 += y[i] - MOVUPS X2, (DI)(AX*4) // y[i] = X2 - ADDQ $4, AX // i += 4 - LOOP axpy_tail4 // } while --CX > 0 - -axpy_tail_start: // Reset loop counter for 1-wide tail loop - MOVQ BX, CX // CX = BX % 4 - ANDQ $3, CX - JZ axpy_end // if CX == 0 { return } - -axpy_tail: - MOVSS (SI)(AX*4), X1 // X1 = x[i] - MULSS X0, X1 // X1 *= a - ADDSS (DI)(AX*4), X1 // X1 += y[i] - MOVSS X1, (DI)(AX*4) // y[i] = X1 - INCQ AX // i++ - LOOP axpy_tail // } while --CX > 0 - -axpy_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s deleted file mode 100644 index e26ccff35..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) -TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ x_base+32(FP), SI // SI = &x - MOVQ y_base+56(FP), DX // DX = &y - MOVQ x_len+40(FP), BX // BX = min( len(x), len(y), len(dst) ) - CMPQ y_len+64(FP), BX - CMOVQLE y_len+64(FP), BX - CMPQ dst_len+8(FP), BX - CMOVQLE dst_len+8(FP), BX - CMPQ BX, $0 // if BX == 0 { return } - JE axpy_end - MOVSS alpha+24(FP), X0 - SHUFPS $0, X0, X0 // X0 = { a, a, a, a, } - XORQ AX, AX // i = 0 - MOVQ DX, CX - ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS - JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } - - XORQ $0xF, CX // CX = 4 - floor ( B % 16 / 4 ) - INCQ CX - SHRQ $2, CX - -axpy_align: // Trim first value(s) in unaligned buffer do { - MOVSS (SI)(AX*4), X2 // X2 = x[i] - MULSS X0, X2 // X2 *= a - ADDSS (DX)(AX*4), X2 // X2 += y[i] - MOVSS X2, (DI)(AX*4) // y[i] = X2 - INCQ AX // i++ - DECQ BX - JZ axpy_end // if --BX == 0 { return } - LOOP axpy_align // } while --CX > 0 - -axpy_no_trim: - MOVUPS X0, X1 // Copy X0 to X1 for pipelining - MOVQ BX, CX - ANDQ $0xF, BX // BX = len % 16 - SHRQ $4, CX // CX = floor( len / 16 ) - JZ axpy_tail4_start // if CX == 0 { return } - -axpy_loop: // Loop unrolled 16x do { - MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] - MOVUPS 16(SI)(AX*4), X3 - MOVUPS 32(SI)(AX*4), X4 - MOVUPS 48(SI)(AX*4), X5 - MULPS X0, X2 // X2 *= a - MULPS X1, X3 - MULPS X0, X4 - MULPS X1, X5 - ADDPS (DX)(AX*4), X2 // X2 += y[i:i+4] - ADDPS 16(DX)(AX*4), X3 - ADDPS 32(DX)(AX*4), X4 - ADDPS 48(DX)(AX*4), X5 - MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 - MOVUPS X3, 16(DI)(AX*4) - MOVUPS X4, 32(DI)(AX*4) - MOVUPS X5, 48(DI)(AX*4) - ADDQ $16, AX // i += 16 - LOOP axpy_loop // while (--CX) > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE axpy_end - -axpy_tail4_start: // Reset loop counter for 4-wide tail loop - MOVQ BX, CX // CX = floor( BX / 4 ) - SHRQ $2, CX - JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } - -axpy_tail4: // Loop unrolled 4x do { - MOVUPS (SI)(AX*4), X2 // X2 = x[i] - MULPS X0, X2 // X2 *= a - ADDPS (DX)(AX*4), X2 // X2 += y[i] - MOVUPS X2, (DI)(AX*4) // y[i] = X2 - ADDQ $4, AX // i += 4 - LOOP axpy_tail4 // } while --CX > 0 - -axpy_tail_start: // Reset loop counter for 1-wide tail loop - MOVQ BX, CX // CX = BX % 4 - ANDQ $3, CX - JZ axpy_end // if CX == 0 { return } - -axpy_tail: - MOVSS (SI)(AX*4), X1 // X1 = x[i] - MULSS X0, X1 // X1 *= a - ADDSS (DX)(AX*4), X1 // X1 += y[i] - MOVSS X1, (DI)(AX*4) // y[i] = X1 - INCQ AX // i++ - LOOP axpy_tail // } while --CX > 0 - -axpy_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s deleted file mode 100644 index de9e31292..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define INC_X R8 -#define INCx3_X R10 -#define INC_Y R9 -#define INCx3_Y R11 -#define SUM X0 -#define P_SUM X1 - -// func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) -TEXT ·DdotInc(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - MOVQ n+48(FP), LEN // LEN = n - PXOR SUM, SUM // SUM = 0 - CMPQ LEN, $0 - JE dot_end - - MOVQ ix+72(FP), INC_X // INC_X = ix - MOVQ iy+80(FP), INC_Y // INC_Y = iy - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) - - MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(float32) - SHLQ $2, INC_X - MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(float32) - SHLQ $2, INC_Y - - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = LEN % 4 - SHRQ $2, LEN // LEN = floor( LEN / 4 ) - JZ dot_tail // if LEN == 0 { goto dot_tail } - - PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 - -dot_loop: // Loop unrolled 4x do { - CVTSS2SD (X_PTR), X2 // X_i = x[i:i+1] - CVTSS2SD (X_PTR)(INC_X*1), X3 - CVTSS2SD (X_PTR)(INC_X*2), X4 - CVTSS2SD (X_PTR)(INCx3_X*1), X5 - - CVTSS2SD (Y_PTR), X6 // X_j = y[i:i+1] - CVTSS2SD (Y_PTR)(INC_Y*1), X7 - CVTSS2SD (Y_PTR)(INC_Y*2), X8 - CVTSS2SD (Y_PTR)(INCx3_Y*1), X9 - - MULSD X6, X2 // X_i *= X_j - MULSD X7, X3 - MULSD X8, X4 - MULSD X9, X5 - - ADDSD X2, SUM // SUM += X_i - ADDSD X3, P_SUM - ADDSD X4, SUM - ADDSD X5, P_SUM - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) - - DECQ LEN - JNZ dot_loop // } while --LEN > 0 - - ADDSD P_SUM, SUM // SUM += P_SUM - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail: // do { - CVTSS2SD (X_PTR), X2 // X2 = x[i] - CVTSS2SD (Y_PTR), X3 // X2 *= y[i] - MULSD X3, X2 - ADDSD X2, SUM // SUM += X2 - ADDQ INC_X, X_PTR // X_PTR += INC_X - ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y - DECQ TAIL - JNZ dot_tail // } while --TAIL > 0 - -dot_end: - MOVSD SUM, sum+88(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s deleted file mode 100644 index d39ab7860..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define HADDPD_SUM_SUM LONG $0xC07C0F66 // @ HADDPD X0, X0 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define IDX AX -#define SUM X0 -#define P_SUM X1 - -// func DdotUnitary(x, y []float32) (sum float32) -TEXT ·DdotUnitary(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) - CMPQ y_len+32(FP), LEN - CMOVQLE y_len+32(FP), LEN - PXOR SUM, SUM // psum = 0 - CMPQ LEN, $0 - JE dot_end - - XORQ IDX, IDX - MOVQ Y_PTR, DX - ANDQ $0xF, DX // Align on 16-byte boundary for ADDPS - JZ dot_no_trim // if DX == 0 { goto dot_no_trim } - - SUBQ $16, DX - -dot_align: // Trim first value(s) in unaligned buffer do { - CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) - CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) - MULSD X3, X2 - ADDSD X2, SUM // SUM += X2 - INCQ IDX // IDX++ - DECQ LEN - JZ dot_end // if --TAIL == 0 { return } - ADDQ $4, DX - JNZ dot_align // } while --LEN > 0 - -dot_no_trim: - PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining - MOVQ LEN, TAIL - ANDQ $0x7, TAIL // TAIL = LEN % 8 - SHRQ $3, LEN // LEN = floor( LEN / 8 ) - JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } - -dot_loop: // Loop unrolled 8x do { - CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] - CVTPS2PD 8(X_PTR)(IDX*4), X3 - CVTPS2PD 16(X_PTR)(IDX*4), X4 - CVTPS2PD 24(X_PTR)(IDX*4), X5 - - CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] - CVTPS2PD 8(Y_PTR)(IDX*4), X7 - CVTPS2PD 16(Y_PTR)(IDX*4), X8 - CVTPS2PD 24(Y_PTR)(IDX*4), X9 - - MULPD X6, X2 // X_i *= X_j - MULPD X7, X3 - MULPD X8, X4 - MULPD X9, X5 - - ADDPD X2, SUM // SUM += X_i - ADDPD X3, P_SUM - ADDPD X4, SUM - ADDPD X5, P_SUM - - ADDQ $8, IDX // IDX += 8 - DECQ LEN - JNZ dot_loop // } while --LEN > 0 - - ADDPD P_SUM, SUM // SUM += P_SUM - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail_start: - MOVQ TAIL, LEN - SHRQ $1, LEN - JZ dot_tail_one - -dot_tail_two: - CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] - CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] - MULPD X6, X2 // X_i *= X_j - ADDPD X2, SUM // SUM += X_i - ADDQ $2, IDX // IDX += 2 - DECQ LEN - JNZ dot_tail_two // } while --LEN > 0 - - ANDQ $1, TAIL - JZ dot_end - -dot_tail_one: - CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) - CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) - MULSD X3, X2 // X2 *= X3 - ADDSD X2, SUM // SUM += X2 - -dot_end: - HADDPD_SUM_SUM // SUM = \sum{ SUM[i] } - MOVSD SUM, sum+48(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go deleted file mode 100644 index 408847a69..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package f32 provides float32 vector primitives. -package f32 // import "gonum.org/v1/gonum/internal/asm/f32" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s deleted file mode 100644 index b6f40210c..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define INC_X R8 -#define INCx3_X R10 -#define INC_Y R9 -#define INCx3_Y R11 -#define SUM X0 -#define P_SUM X1 - -// func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) -TEXT ·DotInc(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - PXOR SUM, SUM // SUM = 0 - MOVQ n+48(FP), LEN // LEN = n - CMPQ LEN, $0 - JE dot_end - - MOVQ ix+72(FP), INC_X // INC_X = ix - MOVQ iy+80(FP), INC_Y // INC_Y = iy - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) - - MOVQ incX+56(FP), INC_X // INC_X := incX * sizeof(float32) - SHLQ $2, INC_X - MOVQ incY+64(FP), INC_Y // INC_Y := incY * sizeof(float32) - SHLQ $2, INC_Y - - MOVQ LEN, TAIL - ANDQ $0x3, TAIL // TAIL = LEN % 4 - SHRQ $2, LEN // LEN = floor( LEN / 4 ) - JZ dot_tail // if LEN == 0 { goto dot_tail } - - PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 - -dot_loop: // Loop unrolled 4x do { - MOVSS (X_PTR), X2 // X_i = x[i:i+1] - MOVSS (X_PTR)(INC_X*1), X3 - MOVSS (X_PTR)(INC_X*2), X4 - MOVSS (X_PTR)(INCx3_X*1), X5 - - MULSS (Y_PTR), X2 // X_i *= y[i:i+1] - MULSS (Y_PTR)(INC_Y*1), X3 - MULSS (Y_PTR)(INC_Y*2), X4 - MULSS (Y_PTR)(INCx3_Y*1), X5 - - ADDSS X2, SUM // SUM += X_i - ADDSS X3, P_SUM - ADDSS X4, SUM - ADDSS X5, P_SUM - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) - - DECQ LEN - JNZ dot_loop // } while --LEN > 0 - - ADDSS P_SUM, SUM // P_SUM += SUM - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail: // do { - MOVSS (X_PTR), X2 // X2 = x[i] - MULSS (Y_PTR), X2 // X2 *= y[i] - ADDSS X2, SUM // SUM += X2 - ADDQ INC_X, X_PTR // X_PTR += INC_X - ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y - DECQ TAIL - JNZ dot_tail // } while --TAIL > 0 - -dot_end: - MOVSS SUM, sum+88(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s deleted file mode 100644 index fd4f7b4e0..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define HADDPS_SUM_SUM LONG $0xC07C0FF2 // @ HADDPS X0, X0 - -#define X_PTR SI -#define Y_PTR DI -#define LEN CX -#define TAIL BX -#define IDX AX -#define SUM X0 -#define P_SUM X1 - -// func DotUnitary(x, y []float32) (sum float32) -TEXT ·DotUnitary(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y - PXOR SUM, SUM // SUM = 0 - MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) - CMPQ y_len+32(FP), LEN - CMOVQLE y_len+32(FP), LEN - CMPQ LEN, $0 - JE dot_end - - XORQ IDX, IDX - MOVQ Y_PTR, DX - ANDQ $0xF, DX // Align on 16-byte boundary for MULPS - JZ dot_no_trim // if DX == 0 { goto dot_no_trim } - SUBQ $16, DX - -dot_align: // Trim first value(s) in unaligned buffer do { - MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] - MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] - ADDSS X2, SUM // SUM += X2 - INCQ IDX // IDX++ - DECQ LEN - JZ dot_end // if --TAIL == 0 { return } - ADDQ $4, DX - JNZ dot_align // } while --DX > 0 - -dot_no_trim: - PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining - MOVQ LEN, TAIL - ANDQ $0xF, TAIL // TAIL = LEN % 16 - SHRQ $4, LEN // LEN = floor( LEN / 16 ) - JZ dot_tail4_start // if LEN == 0 { goto dot_tail4_start } - -dot_loop: // Loop unrolled 16x do { - MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] - MOVUPS 16(X_PTR)(IDX*4), X3 - MOVUPS 32(X_PTR)(IDX*4), X4 - MOVUPS 48(X_PTR)(IDX*4), X5 - - MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] - MULPS 16(Y_PTR)(IDX*4), X3 - MULPS 32(Y_PTR)(IDX*4), X4 - MULPS 48(Y_PTR)(IDX*4), X5 - - ADDPS X2, SUM // SUM += X_i - ADDPS X3, P_SUM - ADDPS X4, SUM - ADDPS X5, P_SUM - - ADDQ $16, IDX // IDX += 16 - DECQ LEN - JNZ dot_loop // } while --LEN > 0 - - ADDPS P_SUM, SUM // SUM += P_SUM - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE dot_end - -dot_tail4_start: // Reset loop counter for 4-wide tail loop - MOVQ TAIL, LEN // LEN = floor( TAIL / 4 ) - SHRQ $2, LEN - JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } - -dot_tail4_loop: // Loop unrolled 4x do { - MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] - MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] - ADDPS X2, SUM // SUM += X_i - ADDQ $4, IDX // i += 4 - DECQ LEN - JNZ dot_tail4_loop // } while --LEN > 0 - -dot_tail_start: // Reset loop counter for 1-wide tail loop - ANDQ $3, TAIL // TAIL = TAIL % 4 - JZ dot_end // if TAIL == 0 { return } - -dot_tail: // do { - MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] - MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] - ADDSS X2, SUM // psum += X2 - INCQ IDX // IDX++ - DECQ TAIL - JNZ dot_tail // } while --TAIL > 0 - -dot_end: - HADDPS_SUM_SUM // SUM = \sum{ SUM[i] } - HADDPS_SUM_SUM - MOVSS SUM, sum+48(FP) // return SUM - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go deleted file mode 100644 index 2b336a2af..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -package f32 - -// Ger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func Ger(m, n uintptr, alpha float32, - x []float32, incX uintptr, - y []float32, incY uintptr, - a []float32, lda uintptr) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s deleted file mode 100644 index ecb2641a9..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s +++ /dev/null @@ -1,757 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SIZE 4 -#define BITSIZE 2 -#define KERNELSIZE 3 - -#define M_DIM m+0(FP) -#define M CX -#define N_DIM n+8(FP) -#define N BX - -#define TMP1 R14 -#define TMP2 R15 - -#define X_PTR SI -#define Y y_base+56(FP) -#define Y_PTR DX -#define A_ROW AX -#define A_PTR DI - -#define INC_X R8 -#define INC3_X R9 - -#define INC_Y R10 -#define INC3_Y R11 - -#define LDA R12 -#define LDA3 R13 - -#define ALPHA X0 -#define ALPHA_SPILL al-16(SP) - -#define LOAD_ALPHA \ - MOVSS alpha+16(FP), ALPHA \ - SHUFPS $0, ALPHA, ALPHA - -#define LOAD_SCALED4 \ - PREFETCHNTA 16*SIZE(X_PTR) \ - MOVDDUP (X_PTR), X1 \ - MOVDDUP 2*SIZE(X_PTR), X3 \ - MOVSHDUP X1, X2 \ - MOVSHDUP X3, X4 \ - MOVSLDUP X1, X1 \ - MOVSLDUP X3, X3 \ - MULPS ALPHA, X1 \ - MULPS ALPHA, X2 \ - MULPS ALPHA, X3 \ - MULPS ALPHA, X4 - -#define LOAD_SCALED2 \ - MOVDDUP (X_PTR), X1 \ - MOVSHDUP X1, X2 \ - MOVSLDUP X1, X1 \ - MULPS ALPHA, X1 \ - MULPS ALPHA, X2 - -#define LOAD_SCALED1 \ - MOVSS (X_PTR), X1 \ - SHUFPS $0, X1, X1 \ - MULPS ALPHA, X1 - -#define LOAD_SCALED4_INC \ - PREFETCHNTA (X_PTR)(INC_X*8) \ - MOVSS (X_PTR), X1 \ - MOVSS (X_PTR)(INC_X*1), X2 \ - MOVSS (X_PTR)(INC_X*2), X3 \ - MOVSS (X_PTR)(INC3_X*1), X4 \ - SHUFPS $0, X1, X1 \ - SHUFPS $0, X2, X2 \ - SHUFPS $0, X3, X3 \ - SHUFPS $0, X4, X4 \ - MULPS ALPHA, X1 \ - MULPS ALPHA, X2 \ - MULPS ALPHA, X3 \ - MULPS ALPHA, X4 - -#define LOAD_SCALED2_INC \ - MOVSS (X_PTR), X1 \ - MOVSS (X_PTR)(INC_X*1), X2 \ - SHUFPS $0, X1, X1 \ - SHUFPS $0, X2, X2 \ - MULPS ALPHA, X1 \ - MULPS ALPHA, X2 - -#define KERNEL_LOAD8 \ - MOVUPS (Y_PTR), X5 \ - MOVUPS 4*SIZE(Y_PTR), X6 - -#define KERNEL_LOAD8_INC \ - MOVSS (Y_PTR), X5 \ - MOVSS (Y_PTR)(INC_Y*1), X6 \ - MOVSS (Y_PTR)(INC_Y*2), X7 \ - MOVSS (Y_PTR)(INC3_Y*1), X8 \ - UNPCKLPS X6, X5 \ - UNPCKLPS X8, X7 \ - MOVLHPS X7, X5 \ - LEAQ (Y_PTR)(INC_Y*4), Y_PTR \ - MOVSS (Y_PTR), X6 \ - MOVSS (Y_PTR)(INC_Y*1), X7 \ - MOVSS (Y_PTR)(INC_Y*2), X8 \ - MOVSS (Y_PTR)(INC3_Y*1), X9 \ - UNPCKLPS X7, X6 \ - UNPCKLPS X9, X8 \ - MOVLHPS X8, X6 - -#define KERNEL_LOAD4 \ - MOVUPS (Y_PTR), X5 - -#define KERNEL_LOAD4_INC \ - MOVSS (Y_PTR), X5 \ - MOVSS (Y_PTR)(INC_Y*1), X6 \ - MOVSS (Y_PTR)(INC_Y*2), X7 \ - MOVSS (Y_PTR)(INC3_Y*1), X8 \ - UNPCKLPS X6, X5 \ - UNPCKLPS X8, X7 \ - MOVLHPS X7, X5 - -#define KERNEL_LOAD2 \ - MOVSD (Y_PTR), X5 - -#define KERNEL_LOAD2_INC \ - MOVSS (Y_PTR), X5 \ - MOVSS (Y_PTR)(INC_Y*1), X6 \ - UNPCKLPS X6, X5 - -#define KERNEL_4x8 \ - MOVUPS X5, X7 \ - MOVUPS X6, X8 \ - MOVUPS X5, X9 \ - MOVUPS X6, X10 \ - MOVUPS X5, X11 \ - MOVUPS X6, X12 \ - MULPS X1, X5 \ - MULPS X1, X6 \ - MULPS X2, X7 \ - MULPS X2, X8 \ - MULPS X3, X9 \ - MULPS X3, X10 \ - MULPS X4, X11 \ - MULPS X4, X12 - -#define STORE_4x8 \ - MOVUPS ALPHA, ALPHA_SPILL \ - MOVUPS (A_PTR), X13 \ - ADDPS X13, X5 \ - MOVUPS 4*SIZE(A_PTR), X14 \ - ADDPS X14, X6 \ - MOVUPS (A_PTR)(LDA*1), X15 \ - ADDPS X15, X7 \ - MOVUPS 4*SIZE(A_PTR)(LDA*1), X0 \ - ADDPS X0, X8 \ - MOVUPS (A_PTR)(LDA*2), X13 \ - ADDPS X13, X9 \ - MOVUPS 4*SIZE(A_PTR)(LDA*2), X14 \ - ADDPS X14, X10 \ - MOVUPS (A_PTR)(LDA3*1), X15 \ - ADDPS X15, X11 \ - MOVUPS 4*SIZE(A_PTR)(LDA3*1), X0 \ - ADDPS X0, X12 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, 4*SIZE(A_PTR) \ - MOVUPS X7, (A_PTR)(LDA*1) \ - MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ - MOVUPS X9, (A_PTR)(LDA*2) \ - MOVUPS X10, 4*SIZE(A_PTR)(LDA*2) \ - MOVUPS X11, (A_PTR)(LDA3*1) \ - MOVUPS X12, 4*SIZE(A_PTR)(LDA3*1) \ - MOVUPS ALPHA_SPILL, ALPHA \ - ADDQ $8*SIZE, A_PTR - -#define KERNEL_4x4 \ - MOVUPS X5, X6 \ - MOVUPS X5, X7 \ - MOVUPS X5, X8 \ - MULPS X1, X5 \ - MULPS X2, X6 \ - MULPS X3, X7 \ - MULPS X4, X8 - -#define STORE_4x4 \ - MOVUPS (A_PTR), X13 \ - ADDPS X13, X5 \ - MOVUPS (A_PTR)(LDA*1), X14 \ - ADDPS X14, X6 \ - MOVUPS (A_PTR)(LDA*2), X15 \ - ADDPS X15, X7 \ - MOVUPS (A_PTR)(LDA3*1), X13 \ - ADDPS X13, X8 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, (A_PTR)(LDA*1) \ - MOVUPS X7, (A_PTR)(LDA*2) \ - MOVUPS X8, (A_PTR)(LDA3*1) \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_4x2 \ - MOVUPS X5, X6 \ - MOVUPS X5, X7 \ - MOVUPS X5, X8 \ - MULPS X1, X5 \ - MULPS X2, X6 \ - MULPS X3, X7 \ - MULPS X4, X8 - -#define STORE_4x2 \ - MOVSD (A_PTR), X9 \ - ADDPS X9, X5 \ - MOVSD (A_PTR)(LDA*1), X10 \ - ADDPS X10, X6 \ - MOVSD (A_PTR)(LDA*2), X11 \ - ADDPS X11, X7 \ - MOVSD (A_PTR)(LDA3*1), X12 \ - ADDPS X12, X8 \ - MOVSD X5, (A_PTR) \ - MOVSD X6, (A_PTR)(LDA*1) \ - MOVSD X7, (A_PTR)(LDA*2) \ - MOVSD X8, (A_PTR)(LDA3*1) \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_4x1 \ - MOVSS (Y_PTR), X5 \ - MOVSS X5, X6 \ - MOVSS X5, X7 \ - MOVSS X5, X8 \ - MULSS X1, X5 \ - MULSS X2, X6 \ - MULSS X3, X7 \ - MULSS X4, X8 - -#define STORE_4x1 \ - ADDSS (A_PTR), X5 \ - ADDSS (A_PTR)(LDA*1), X6 \ - ADDSS (A_PTR)(LDA*2), X7 \ - ADDSS (A_PTR)(LDA3*1), X8 \ - MOVSS X5, (A_PTR) \ - MOVSS X6, (A_PTR)(LDA*1) \ - MOVSS X7, (A_PTR)(LDA*2) \ - MOVSS X8, (A_PTR)(LDA3*1) \ - ADDQ $SIZE, A_PTR - -#define KERNEL_2x8 \ - MOVUPS X5, X7 \ - MOVUPS X6, X8 \ - MULPS X1, X5 \ - MULPS X1, X6 \ - MULPS X2, X7 \ - MULPS X2, X8 - -#define STORE_2x8 \ - MOVUPS (A_PTR), X9 \ - ADDPS X9, X5 \ - MOVUPS 4*SIZE(A_PTR), X10 \ - ADDPS X10, X6 \ - MOVUPS (A_PTR)(LDA*1), X11 \ - ADDPS X11, X7 \ - MOVUPS 4*SIZE(A_PTR)(LDA*1), X12 \ - ADDPS X12, X8 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, 4*SIZE(A_PTR) \ - MOVUPS X7, (A_PTR)(LDA*1) \ - MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ - ADDQ $8*SIZE, A_PTR - -#define KERNEL_2x4 \ - MOVUPS X5, X6 \ - MULPS X1, X5 \ - MULPS X2, X6 - -#define STORE_2x4 \ - MOVUPS (A_PTR), X9 \ - ADDPS X9, X5 \ - MOVUPS (A_PTR)(LDA*1), X11 \ - ADDPS X11, X6 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, (A_PTR)(LDA*1) \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_2x2 \ - MOVSD X5, X6 \ - MULPS X1, X5 \ - MULPS X2, X6 - -#define STORE_2x2 \ - MOVSD (A_PTR), X7 \ - ADDPS X7, X5 \ - MOVSD (A_PTR)(LDA*1), X8 \ - ADDPS X8, X6 \ - MOVSD X5, (A_PTR) \ - MOVSD X6, (A_PTR)(LDA*1) \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_2x1 \ - MOVSS (Y_PTR), X5 \ - MOVSS X5, X6 \ - MULSS X1, X5 \ - MULSS X2, X6 - -#define STORE_2x1 \ - ADDSS (A_PTR), X5 \ - ADDSS (A_PTR)(LDA*1), X6 \ - MOVSS X5, (A_PTR) \ - MOVSS X6, (A_PTR)(LDA*1) \ - ADDQ $SIZE, A_PTR - -#define KERNEL_1x8 \ - MULPS X1, X5 \ - MULPS X1, X6 - -#define STORE_1x8 \ - MOVUPS (A_PTR), X7 \ - ADDPS X7, X5 \ - MOVUPS 4*SIZE(A_PTR), X8 \ - ADDPS X8, X6 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, 4*SIZE(A_PTR) \ - ADDQ $8*SIZE, A_PTR - -#define KERNEL_1x4 \ - MULPS X1, X5 \ - MULPS X1, X6 - -#define STORE_1x4 \ - MOVUPS (A_PTR), X7 \ - ADDPS X7, X5 \ - MOVUPS X5, (A_PTR) \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_1x2 \ - MULPS X1, X5 - -#define STORE_1x2 \ - MOVSD (A_PTR), X6 \ - ADDPS X6, X5 \ - MOVSD X5, (A_PTR) \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_1x1 \ - MOVSS (Y_PTR), X5 \ - MULSS X1, X5 - -#define STORE_1x1 \ - ADDSS (A_PTR), X5 \ - MOVSS X5, (A_PTR) \ - ADDQ $SIZE, A_PTR - -// func Ger(m, n uintptr, alpha float32, -// x []float32, incX uintptr, -// y []float32, incY uintptr, -// a []float32, lda uintptr) -TEXT ·Ger(SB), 0, $16-120 - MOVQ M_DIM, M - MOVQ N_DIM, N - CMPQ M, $0 - JE end - CMPQ N, $0 - JE end - - LOAD_ALPHA - - MOVQ x_base+24(FP), X_PTR - MOVQ y_base+56(FP), Y_PTR - MOVQ a_base+88(FP), A_ROW - MOVQ A_ROW, A_PTR - MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float32) - SHLQ $BITSIZE, LDA - LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 - - CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) - JNE inc - CMPQ incX+48(FP), $1 // Check for dense vector X (fast-path) - JNE inc - - SHRQ $2, M - JZ r2 - -r4: - - // LOAD 4 - LOAD_SCALED4 - - MOVQ N_DIM, N - SHRQ $KERNELSIZE, N - JZ r4c4 - -r4c8: - // 4x8 KERNEL - KERNEL_LOAD8 - KERNEL_4x8 - STORE_4x8 - - ADDQ $8*SIZE, Y_PTR - - DECQ N - JNZ r4c8 - -r4c4: - TESTQ $4, N_DIM - JZ r4c2 - - // 4x4 KERNEL - KERNEL_LOAD4 - KERNEL_4x4 - STORE_4x4 - - ADDQ $4*SIZE, Y_PTR - -r4c2: - TESTQ $2, N_DIM - JZ r4c1 - - // 4x2 KERNEL - KERNEL_LOAD2 - KERNEL_4x2 - STORE_4x2 - - ADDQ $2*SIZE, Y_PTR - -r4c1: - TESTQ $1, N_DIM - JZ r4end - - // 4x1 KERNEL - KERNEL_4x1 - STORE_4x1 - - ADDQ $SIZE, Y_PTR - -r4end: - ADDQ $4*SIZE, X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ M - JNZ r4 - -r2: - TESTQ $2, M_DIM - JZ r1 - - // LOAD 2 - LOAD_SCALED2 - - MOVQ N_DIM, N - SHRQ $KERNELSIZE, N - JZ r2c4 - -r2c8: - // 2x8 KERNEL - KERNEL_LOAD8 - KERNEL_2x8 - STORE_2x8 - - ADDQ $8*SIZE, Y_PTR - - DECQ N - JNZ r2c8 - -r2c4: - TESTQ $4, N_DIM - JZ r2c2 - - // 2x4 KERNEL - KERNEL_LOAD4 - KERNEL_2x4 - STORE_2x4 - - ADDQ $4*SIZE, Y_PTR - -r2c2: - TESTQ $2, N_DIM - JZ r2c1 - - // 2x2 KERNEL - KERNEL_LOAD2 - KERNEL_2x2 - STORE_2x2 - - ADDQ $2*SIZE, Y_PTR - -r2c1: - TESTQ $1, N_DIM - JZ r2end - - // 2x1 KERNEL - KERNEL_2x1 - STORE_2x1 - - ADDQ $SIZE, Y_PTR - -r2end: - ADDQ $2*SIZE, X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -r1: - TESTQ $1, M_DIM - JZ end - - // LOAD 1 - LOAD_SCALED1 - - MOVQ N_DIM, N - SHRQ $KERNELSIZE, N - JZ r1c4 - -r1c8: - // 1x8 KERNEL - KERNEL_LOAD8 - KERNEL_1x8 - STORE_1x8 - - ADDQ $8*SIZE, Y_PTR - - DECQ N - JNZ r1c8 - -r1c4: - TESTQ $4, N_DIM - JZ r1c2 - - // 1x4 KERNEL - KERNEL_LOAD4 - KERNEL_1x4 - STORE_1x4 - - ADDQ $4*SIZE, Y_PTR - -r1c2: - TESTQ $2, N_DIM - JZ r1c1 - - // 1x2 KERNEL - KERNEL_LOAD2 - KERNEL_1x2 - STORE_1x2 - - ADDQ $2*SIZE, Y_PTR - -r1c1: - TESTQ $1, N_DIM - JZ end - - // 1x1 KERNEL - KERNEL_1x1 - STORE_1x1 - -end: - RET - -inc: // Algorithm for incY != 0 ( split loads in kernel ) - - MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float32) - SHLQ $BITSIZE, INC_X - MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float32) - SHLQ $BITSIZE, INC_Y - LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 - LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 - - XORQ TMP2, TMP2 - MOVQ M, TMP1 - SUBQ $1, TMP1 - IMULQ INC_X, TMP1 - NEGQ TMP1 - CMPQ INC_X, $0 - CMOVQLT TMP1, TMP2 - LEAQ (X_PTR)(TMP2*SIZE), X_PTR - - XORQ TMP2, TMP2 - MOVQ N, TMP1 - SUBQ $1, TMP1 - IMULQ INC_Y, TMP1 - NEGQ TMP1 - CMPQ INC_Y, $0 - CMOVQLT TMP1, TMP2 - LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR - - SHRQ $2, M - JZ inc_r2 - -inc_r4: - // LOAD 4 - LOAD_SCALED4_INC - - MOVQ N_DIM, N - SHRQ $KERNELSIZE, N - JZ inc_r4c4 - -inc_r4c8: - // 4x4 KERNEL - KERNEL_LOAD8_INC - KERNEL_4x8 - STORE_4x8 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ N - JNZ inc_r4c8 - -inc_r4c4: - TESTQ $4, N_DIM - JZ inc_r4c2 - - // 4x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_4x4 - STORE_4x4 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - -inc_r4c2: - TESTQ $2, N_DIM - JZ inc_r4c1 - - // 4x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_4x2 - STORE_4x2 - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_r4c1: - TESTQ $1, N_DIM - JZ inc_r4end - - // 4x1 KERNEL - KERNEL_4x1 - STORE_4x1 - - ADDQ INC_Y, Y_PTR - -inc_r4end: - LEAQ (X_PTR)(INC_X*4), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ M - JNZ inc_r4 - -inc_r2: - TESTQ $2, M_DIM - JZ inc_r1 - - // LOAD 2 - LOAD_SCALED2_INC - - MOVQ N_DIM, N - SHRQ $KERNELSIZE, N - JZ inc_r2c4 - -inc_r2c8: - // 2x8 KERNEL - KERNEL_LOAD8_INC - KERNEL_2x8 - STORE_2x8 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ N - JNZ inc_r2c8 - -inc_r2c4: - TESTQ $4, N_DIM - JZ inc_r2c2 - - // 2x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_2x4 - STORE_2x4 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - -inc_r2c2: - TESTQ $2, N_DIM - JZ inc_r2c1 - - // 2x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_2x2 - STORE_2x2 - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_r2c1: - TESTQ $1, N_DIM - JZ inc_r2end - - // 2x1 KERNEL - KERNEL_2x1 - STORE_2x1 - - ADDQ INC_Y, Y_PTR - -inc_r2end: - LEAQ (X_PTR)(INC_X*2), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -inc_r1: - TESTQ $1, M_DIM - JZ end - - // LOAD 1 - LOAD_SCALED1 - - MOVQ N_DIM, N - SHRQ $KERNELSIZE, N - JZ inc_r1c4 - -inc_r1c8: - // 1x8 KERNEL - KERNEL_LOAD8_INC - KERNEL_1x8 - STORE_1x8 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ N - JNZ inc_r1c8 - -inc_r1c4: - TESTQ $4, N_DIM - JZ inc_r1c2 - - // 1x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_1x4 - STORE_1x4 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - -inc_r1c2: - TESTQ $2, N_DIM - JZ inc_r1c1 - - // 1x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_1x2 - STORE_1x2 - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_r1c1: - TESTQ $1, N_DIM - JZ inc_end - - // 1x1 KERNEL - KERNEL_1x1 - STORE_1x1 - -inc_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go deleted file mode 100644 index d92f9968d..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package f32 - -// Ger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func Ger(m, n uintptr, alpha float32, x []float32, incX uintptr, y []float32, incY uintptr, a []float32, lda uintptr) { - - if incX == 1 && incY == 1 { - x = x[:m] - y = y[:n] - for i, xv := range x { - AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) - } - return - } - - var ky, kx uintptr - if int(incY) < 0 { - ky = uintptr(-int(n-1) * int(incY)) - } - if int(incX) < 0 { - kx = uintptr(-int(m-1) * int(incX)) - } - - ix := kx - for i := 0; i < int(m); i++ { - AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], uintptr(n), uintptr(incY), 1, uintptr(ky), 0) - ix += incX - } -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go deleted file mode 100644 index d0867a460..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package f32 - -// ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } -func ScalUnitary(alpha float32, x []float32) { - for i := range x { - x[i] *= alpha - } -} - -// ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } -func ScalUnitaryTo(dst []float32, alpha float32, x []float32) { - for i, v := range x { - dst[i] = alpha * v - } -} - -// ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } -func ScalInc(alpha float32, x []float32, n, incX uintptr) { - var ix uintptr - for i := 0; i < int(n); i++ { - x[ix] *= alpha - ix += incX - } -} - -// ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } -func ScalIncTo(dst []float32, incDst uintptr, alpha float32, x []float32, n, incX uintptr) { - var idst, ix uintptr - for i := 0; i < int(n); i++ { - dst[idst] = alpha * x[ix] - ix += incX - idst += incDst - } -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go deleted file mode 100644 index fcbce09e7..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -package f32 - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha float32, x, y []float32) - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) - -// DdotUnitary is -// for i, v := range x { -// sum += float64(y[i]) * float64(v) -// } -// return -func DdotUnitary(x, y []float32) (sum float64) - -// DdotInc is -// for i := 0; i < int(n); i++ { -// sum += float64(y[iy]) * float64(x[ix]) -// ix += incX -// iy += incY -// } -// return -func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) - -// DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotUnitary(x, y []float32) (sum float32) - -// DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go deleted file mode 100644 index 3b5b09702..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package f32 - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha float32, x, y []float32) { - for i, v := range x { - y[i] += alpha * v - } -} - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) { - for i, v := range x { - dst[i] = alpha*v + y[i] - } -} - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - dst[idst] = alpha*x[ix] + y[iy] - ix += incX - iy += incY - idst += incDst - } -} - -// DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotUnitary(x, y []float32) (sum float32) { - for i, v := range x { - sum += y[i] * v - } - return sum -} - -// DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return sum -} - -// DdotUnitary is -// for i, v := range x { -// sum += float64(y[i]) * float64(v) -// } -// return -func DdotUnitary(x, y []float32) (sum float64) { - for i, v := range x { - sum += float64(y[i]) * float64(v) - } - return -} - -// DdotInc is -// for i := 0; i < int(n); i++ { -// sum += float64(y[iy]) * float64(x[ix]) -// ix += incX -// iy += incY -// } -// return -func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { - for i := 0; i < int(n); i++ { - sum += float64(y[iy]) * float64(x[ix]) - ix += incX - iy += incY - } - return -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s deleted file mode 100644 index d9d61bb7b..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func L1Norm(x []float64) float64 -TEXT ·L1Norm(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), SI // SI = &x - MOVQ x_len+8(FP), CX // CX = len(x) - XORQ AX, AX // i = 0 - PXOR X0, X0 // p_sum_i = 0 - PXOR X1, X1 - PXOR X2, X2 - PXOR X3, X3 - PXOR X4, X4 - PXOR X5, X5 - PXOR X6, X6 - PXOR X7, X7 - CMPQ CX, $0 // if CX == 0 { return 0 } - JE absum_end - MOVQ CX, BX - ANDQ $7, BX // BX = len(x) % 8 - SHRQ $3, CX // CX = floor( len(x) / 8 ) - JZ absum_tail_start // if CX == 0 { goto absum_tail_start } - -absum_loop: // do { - // p_sum += max( p_sum + x[i], p_sum - x[i] ) - MOVUPS (SI)(AX*8), X8 // X_i = x[i:i+1] - MOVUPS 16(SI)(AX*8), X9 - MOVUPS 32(SI)(AX*8), X10 - MOVUPS 48(SI)(AX*8), X11 - ADDPD X8, X0 // p_sum_i += X_i ( positive values ) - ADDPD X9, X2 - ADDPD X10, X4 - ADDPD X11, X6 - SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) - SUBPD X9, X3 - SUBPD X10, X5 - SUBPD X11, X7 - MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) - MAXPD X3, X2 - MAXPD X5, X4 - MAXPD X7, X6 - MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i - MOVAPS X2, X3 - MOVAPS X4, X5 - MOVAPS X6, X7 - ADDQ $8, AX // i += 8 - LOOP absum_loop // } while --CX > 0 - - // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) - ADDPD X3, X0 - ADDPD X5, X7 - ADDPD X7, X0 - - // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] - MOVAPS X0, X1 - SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) - ADDSD X1, X0 - CMPQ BX, $0 - JE absum_end // if BX == 0 { goto absum_end } - -absum_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - XORPS X8, X8 // X_8 = 0 - -absum_tail: // do { - // p_sum += max( p_sum + x[i], p_sum - x[i] ) - MOVSD (SI)(AX*8), X8 // X_8 = x[i] - MOVSD X0, X1 // p_sum_1 = p_sum_0 - ADDSD X8, X0 // p_sum_0 += X_8 - SUBSD X8, X1 // p_sum_1 -= X_8 - MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) - INCQ AX // i++ - LOOP absum_tail // } while --CX > 0 - -absum_end: // return p_sum_0 - MOVSD X0, sum+24(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s deleted file mode 100644 index cac19aa64..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func L1NormInc(x []float64, n, incX int) (sum float64) -TEXT ·L1NormInc(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), SI // SI = &x - MOVQ n+24(FP), CX // CX = n - MOVQ incX+32(FP), AX // AX = increment * sizeof( float64 ) - SHLQ $3, AX - MOVQ AX, DX // DX = AX * 3 - IMULQ $3, DX - PXOR X0, X0 // p_sum_i = 0 - PXOR X1, X1 - PXOR X2, X2 - PXOR X3, X3 - PXOR X4, X4 - PXOR X5, X5 - PXOR X6, X6 - PXOR X7, X7 - CMPQ CX, $0 // if CX == 0 { return 0 } - JE absum_end - MOVQ CX, BX - ANDQ $7, BX // BX = n % 8 - SHRQ $3, CX // CX = floor( n / 8 ) - JZ absum_tail_start // if CX == 0 { goto absum_tail_start } - -absum_loop: // do { - // p_sum = max( p_sum + x[i], p_sum - x[i] ) - MOVSD (SI), X8 // X_i[0] = x[i] - MOVSD (SI)(AX*1), X9 - MOVSD (SI)(AX*2), X10 - MOVSD (SI)(DX*1), X11 - LEAQ (SI)(AX*4), SI // SI = SI + 4 - MOVHPD (SI), X8 // X_i[1] = x[i+4] - MOVHPD (SI)(AX*1), X9 - MOVHPD (SI)(AX*2), X10 - MOVHPD (SI)(DX*1), X11 - ADDPD X8, X0 // p_sum_i += X_i ( positive values ) - ADDPD X9, X2 - ADDPD X10, X4 - ADDPD X11, X6 - SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) - SUBPD X9, X3 - SUBPD X10, X5 - SUBPD X11, X7 - MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) - MAXPD X3, X2 - MAXPD X5, X4 - MAXPD X7, X6 - MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i - MOVAPS X2, X3 - MOVAPS X4, X5 - MOVAPS X6, X7 - LEAQ (SI)(AX*4), SI // SI = SI + 4 - LOOP absum_loop // } while --CX > 0 - - // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) - ADDPD X3, X0 - ADDPD X5, X7 - ADDPD X7, X0 - - // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] - MOVAPS X0, X1 - SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) - ADDSD X1, X0 - CMPQ BX, $0 - JE absum_end // if BX == 0 { goto absum_end } - -absum_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - XORPS X8, X8 // X_8 = 0 - -absum_tail: // do { - // p_sum += max( p_sum + x[i], p_sum - x[i] ) - MOVSD (SI), X8 // X_8 = x[i] - MOVSD X0, X1 // p_sum_1 = p_sum_0 - ADDSD X8, X0 // p_sum_0 += X_8 - SUBSD X8, X1 // p_sum_1 -= X_8 - MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) - ADDQ AX, SI // i++ - LOOP absum_tail // } while --CX > 0 - -absum_end: // return p_sum_0 - MOVSD X0, sum+40(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s deleted file mode 100644 index bc0ea6a40..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func Add(dst, s []float64) -TEXT ·Add(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ dst_len+8(FP), CX // CX = len(dst) - MOVQ s_base+24(FP), SI // SI = &s - CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) - CMOVQLE s_len+32(FP), CX - CMPQ CX, $0 // if CX == 0 { return } - JE add_end - XORQ AX, AX - MOVQ DI, BX - ANDQ $0x0F, BX // BX = &dst & 15 - JZ add_no_trim // if BX == 0 { goto add_no_trim } - - // Align on 16-bit boundary - MOVSD (SI)(AX*8), X0 // X0 = s[i] - ADDSD (DI)(AX*8), X0 // X0 += dst[i] - MOVSD X0, (DI)(AX*8) // dst[i] = X0 - INCQ AX // i++ - DECQ CX // --CX - JE add_end // if CX == 0 { return } - -add_no_trim: - MOVQ CX, BX - ANDQ $7, BX // BX = len(dst) % 8 - SHRQ $3, CX // CX = floor( len(dst) / 8 ) - JZ add_tail_start // if CX == 0 { goto add_tail_start } - -add_loop: // Loop unrolled 8x do { - MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] - MOVUPS 16(SI)(AX*8), X1 - MOVUPS 32(SI)(AX*8), X2 - MOVUPS 48(SI)(AX*8), X3 - ADDPD (DI)(AX*8), X0 // X_i += dst[i:i+1] - ADDPD 16(DI)(AX*8), X1 - ADDPD 32(DI)(AX*8), X2 - ADDPD 48(DI)(AX*8), X3 - MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X_i - MOVUPS X1, 16(DI)(AX*8) - MOVUPS X2, 32(DI)(AX*8) - MOVUPS X3, 48(DI)(AX*8) - ADDQ $8, AX // i += 8 - LOOP add_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE add_end - -add_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - -add_tail: // do { - MOVSD (SI)(AX*8), X0 // X0 = s[i] - ADDSD (DI)(AX*8), X0 // X0 += dst[i] - MOVSD X0, (DI)(AX*8) // dst[i] = X0 - INCQ AX // ++i - LOOP add_tail // } while --CX > 0 - -add_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s deleted file mode 100644 index 7cc68c78c..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func Addconst(alpha float64, x []float64) -TEXT ·AddConst(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), SI // SI = &x - MOVQ x_len+16(FP), CX // CX = len(x) - CMPQ CX, $0 // if len(x) == 0 { return } - JE ac_end - MOVSD alpha+0(FP), X4 // X4 = { a, a } - SHUFPD $0, X4, X4 - MOVUPS X4, X5 // X5 = X4 - XORQ AX, AX // i = 0 - MOVQ CX, BX - ANDQ $7, BX // BX = len(x) % 8 - SHRQ $3, CX // CX = floor( len(x) / 8 ) - JZ ac_tail_start // if CX == 0 { goto ac_tail_start } - -ac_loop: // Loop unrolled 8x do { - MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] - MOVUPS 16(SI)(AX*8), X1 - MOVUPS 32(SI)(AX*8), X2 - MOVUPS 48(SI)(AX*8), X3 - ADDPD X4, X0 // X_i += a - ADDPD X5, X1 - ADDPD X4, X2 - ADDPD X5, X3 - MOVUPS X0, (SI)(AX*8) // s[i:i+1] = X_i - MOVUPS X1, 16(SI)(AX*8) - MOVUPS X2, 32(SI)(AX*8) - MOVUPS X3, 48(SI)(AX*8) - ADDQ $8, AX // i += 8 - LOOP ac_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE ac_end - -ac_tail_start: // Reset loop counters - MOVQ BX, CX // Loop counter: CX = BX - -ac_tail: // do { - MOVSD (SI)(AX*8), X0 // X0 = s[i] - ADDSD X4, X0 // X0 += a - MOVSD X0, (SI)(AX*8) // s[i] = X0 - INCQ AX // ++i - LOOP ac_tail // } while --CX > 0 - -ac_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go deleted file mode 100644 index b83221398..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package f64 - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha float64, x, y []float64) { - for i, v := range x { - y[i] += alpha * v - } -} - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) { - for i, v := range x { - dst[i] = alpha*v + y[i] - } -} - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - dst[idst] = alpha*x[ix] + y[iy] - ix += incX - iy += incY - idst += incDst - } -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s deleted file mode 100644 index aab22e35a..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define Y_PTR DI -#define DST_PTR DI -#define IDX AX -#define LEN CX -#define TAIL BX -#define INC_X R8 -#define INCx3_X R11 -#define INC_Y R9 -#define INCx3_Y R12 -#define INC_DST R9 -#define INCx3_DST R12 -#define ALPHA X0 -#define ALPHA_2 X1 - -// func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyInc(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), X_PTR // X_PTR = &x - MOVQ y_base+32(FP), Y_PTR // Y_PTR = &y - MOVQ n+56(FP), LEN // LEN = n - CMPQ LEN, $0 // if LEN == 0 { return } - JE end - - MOVQ ix+80(FP), INC_X - MOVQ iy+88(FP), INC_Y - LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) - LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(y[iy]) - MOVQ Y_PTR, DST_PTR // DST_PTR = Y_PTR // Write pointer - - MOVQ incX+64(FP), INC_X // INC_X = incX * sizeof(float64) - SHLQ $3, INC_X - MOVQ incY+72(FP), INC_Y // INC_Y = incY * sizeof(float64) - SHLQ $3, INC_Y - - MOVSD alpha+0(FP), ALPHA // ALPHA = alpha - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = n % 4 - SHRQ $2, LEN // LEN = floor( n / 4 ) - JZ tail_start // if LEN == 0 { goto tail_start } - - MOVAPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 - -loop: // do { // y[i] += alpha * x[i] unrolled 4x. - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MOVSD (X_PTR)(INC_X*2), X4 - MOVSD (X_PTR)(INCx3_X*1), X5 - - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA_2, X3 - MULSD ALPHA, X4 - MULSD ALPHA_2, X5 - - ADDSD (Y_PTR), X2 // X_i += y[i] - ADDSD (Y_PTR)(INC_Y*1), X3 - ADDSD (Y_PTR)(INC_Y*2), X4 - ADDSD (Y_PTR)(INCx3_Y*1), X5 - - MOVSD X2, (DST_PTR) // y[i] = X_i - MOVSD X3, (DST_PTR)(INC_DST*1) - MOVSD X4, (DST_PTR)(INC_DST*2) - MOVSD X5, (DST_PTR)(INCx3_DST*1) - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) - DECQ LEN - JNZ loop // } while --LEN > 0 - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE end - -tail_start: // Reset Loop registers - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( LEN / 2 ) - JZ tail_one - -tail_two: - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA, X3 - ADDSD (Y_PTR), X2 // X_i += y[i] - ADDSD (Y_PTR)(INC_Y*1), X3 - MOVSD X2, (DST_PTR) // y[i] = X_i - MOVSD X3, (DST_PTR)(INC_DST*1) - - LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) - LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) - - ANDQ $1, TAIL - JZ end // if TAIL == 0 { goto end } - -tail_one: - // y[i] += alpha * x[i] for the last n % 4 iterations. - MOVSD (X_PTR), X2 // X2 = x[i] - MULSD ALPHA, X2 // X2 *= a - ADDSD (Y_PTR), X2 // X2 += y[i] - MOVSD X2, (DST_PTR) // y[i] = X2 - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s deleted file mode 100644 index f2fb97715..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define Y_PTR DI -#define DST_PTR DX -#define IDX AX -#define LEN CX -#define TAIL BX -#define INC_X R8 -#define INCx3_X R11 -#define INC_Y R9 -#define INCx3_Y R12 -#define INC_DST R10 -#define INCx3_DST R13 -#define ALPHA X0 -#define ALPHA_2 X1 - -// func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) -TEXT ·AxpyIncTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst - MOVQ x_base+48(FP), X_PTR // X_PTR := &x - MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y - MOVQ n+96(FP), LEN // LEN := n - CMPQ LEN, $0 // if LEN == 0 { return } - JE end - - MOVQ ix+120(FP), INC_X - LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) - MOVQ iy+128(FP), INC_Y - LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst]) - MOVQ idst+32(FP), INC_DST - LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy]) - - MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64) - SHLQ $3, INC_X - MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64) - SHLQ $3, INC_Y - MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64) - SHLQ $3, INC_DST - MOVSD alpha+40(FP), ALPHA - - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = n % 4 - SHRQ $2, LEN // LEN = floor( n / 4 ) - JZ tail_start // if LEN == 0 { goto tail_start } - - MOVSD ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 - LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 - -loop: // do { // y[i] += alpha * x[i] unrolled 2x. - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MOVSD (X_PTR)(INC_X*2), X4 - MOVSD (X_PTR)(INCx3_X*1), X5 - - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA_2, X3 - MULSD ALPHA, X4 - MULSD ALPHA_2, X5 - - ADDSD (Y_PTR), X2 // X_i += y[i] - ADDSD (Y_PTR)(INC_Y*1), X3 - ADDSD (Y_PTR)(INC_Y*2), X4 - ADDSD (Y_PTR)(INCx3_Y*1), X5 - - MOVSD X2, (DST_PTR) // y[i] = X_i - MOVSD X3, (DST_PTR)(INC_DST*1) - MOVSD X4, (DST_PTR)(INC_DST*2) - MOVSD X5, (DST_PTR)(INCx3_DST*1) - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) - LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4] - DECQ LEN - JNZ loop // } while --LEN > 0 - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE end - -tail_start: // Reset Loop registers - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( LEN / 2 ) - JZ tail_one - -tail_two: - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA, X3 - ADDSD (Y_PTR), X2 // X_i += y[i] - ADDSD (Y_PTR)(INC_Y*1), X3 - MOVSD X2, (DST_PTR) // y[i] = X_i - MOVSD X3, (DST_PTR)(INC_DST*1) - - LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) - LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) - LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2] - - ANDQ $1, TAIL - JZ end // if TAIL == 0 { goto end } - -tail_one: - MOVSD (X_PTR), X2 // X2 = x[i] - MULSD ALPHA, X2 // X2 *= a - ADDSD (Y_PTR), X2 // X2 += y[i] - MOVSD X2, (DST_PTR) // y[i] = X2 - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s deleted file mode 100644 index cc519cfd4..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define Y_PTR DI -#define DST_PTR DI -#define IDX AX -#define LEN CX -#define TAIL BX -#define ALPHA X0 -#define ALPHA_2 X1 - -// func AxpyUnitary(alpha float64, x, y []float64) -TEXT ·AxpyUnitary(SB), NOSPLIT, $0 - MOVQ x_base+8(FP), X_PTR // X_PTR := &x - MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y - MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) ) - CMPQ y_len+40(FP), LEN - CMOVQLE y_len+40(FP), LEN - CMPQ LEN, $0 // if LEN == 0 { return } - JE end - XORQ IDX, IDX - MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha } - SHUFPD $0, ALPHA, ALPHA - MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining - MOVQ Y_PTR, TAIL // Check memory alignment - ANDQ $15, TAIL // TAIL = &y % 16 - JZ no_trim // if TAIL == 0 { goto no_trim } - - // Align on 16-byte boundary - MOVSD (X_PTR), X2 // X2 := x[0] - MULSD ALPHA, X2 // X2 *= a - ADDSD (Y_PTR), X2 // X2 += y[0] - MOVSD X2, (DST_PTR) // y[0] = X2 - INCQ IDX // i++ - DECQ LEN // LEN-- - JZ end // if LEN == 0 { return } - -no_trim: - MOVQ LEN, TAIL - ANDQ $7, TAIL // TAIL := n % 8 - SHRQ $3, LEN // LEN = floor( n / 8 ) - JZ tail_start // if LEN == 0 { goto tail2_start } - -loop: // do { - // y[i] += alpha * x[i] unrolled 8x. - MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] - MOVUPS 16(X_PTR)(IDX*8), X3 - MOVUPS 32(X_PTR)(IDX*8), X4 - MOVUPS 48(X_PTR)(IDX*8), X5 - - MULPD ALPHA, X2 // X_i *= a - MULPD ALPHA_2, X3 - MULPD ALPHA, X4 - MULPD ALPHA_2, X5 - - ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] - ADDPD 16(Y_PTR)(IDX*8), X3 - ADDPD 32(Y_PTR)(IDX*8), X4 - ADDPD 48(Y_PTR)(IDX*8), X5 - - MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i - MOVUPS X3, 16(DST_PTR)(IDX*8) - MOVUPS X4, 32(DST_PTR)(IDX*8) - MOVUPS X5, 48(DST_PTR)(IDX*8) - - ADDQ $8, IDX // i += 8 - DECQ LEN - JNZ loop // } while --LEN > 0 - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE end - -tail_start: // Reset loop registers - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( TAIL / 2 ) - JZ tail_one // if TAIL == 0 { goto tail } - -tail_two: // do { - MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] - MULPD ALPHA, X2 // X2 *= a - ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] - MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 - ADDQ $2, IDX // i += 2 - DECQ LEN - JNZ tail_two // } while --LEN > 0 - - ANDQ $1, TAIL - JZ end // if TAIL == 0 { goto end } - -tail_one: - MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] - MULSD ALPHA, X2 // X2 *= a - ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] - MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s deleted file mode 100644 index 3918092f5..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define Y_PTR DX -#define DST_PTR DI -#define IDX AX -#define LEN CX -#define TAIL BX -#define ALPHA X0 -#define ALPHA_2 X1 - -// func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) -TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst - MOVQ x_base+32(FP), X_PTR // X_PTR := &x - MOVQ y_base+56(FP), Y_PTR // Y_PTR := &y - MOVQ x_len+40(FP), LEN // LEN = min( len(x), len(y), len(dst) ) - CMPQ y_len+64(FP), LEN - CMOVQLE y_len+64(FP), LEN - CMPQ dst_len+8(FP), LEN - CMOVQLE dst_len+8(FP), LEN - - CMPQ LEN, $0 - JE end // if LEN == 0 { return } - - XORQ IDX, IDX // IDX = 0 - MOVSD alpha+24(FP), ALPHA - SHUFPD $0, ALPHA, ALPHA // ALPHA := { alpha, alpha } - MOVQ Y_PTR, TAIL // Check memory alignment - ANDQ $15, TAIL // TAIL = &y % 16 - JZ no_trim // if TAIL == 0 { goto no_trim } - - // Align on 16-byte boundary - MOVSD (X_PTR), X2 // X2 := x[0] - MULSD ALPHA, X2 // X2 *= a - ADDSD (Y_PTR), X2 // X2 += y[0] - MOVSD X2, (DST_PTR) // y[0] = X2 - INCQ IDX // i++ - DECQ LEN // LEN-- - JZ end // if LEN == 0 { return } - -no_trim: - MOVQ LEN, TAIL - ANDQ $7, TAIL // TAIL := n % 8 - SHRQ $3, LEN // LEN = floor( n / 8 ) - JZ tail_start // if LEN == 0 { goto tail_start } - - MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining - -loop: // do { - // y[i] += alpha * x[i] unrolled 8x. - MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] - MOVUPS 16(X_PTR)(IDX*8), X3 - MOVUPS 32(X_PTR)(IDX*8), X4 - MOVUPS 48(X_PTR)(IDX*8), X5 - - MULPD ALPHA, X2 // X_i *= alpha - MULPD ALPHA_2, X3 - MULPD ALPHA, X4 - MULPD ALPHA_2, X5 - - ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] - ADDPD 16(Y_PTR)(IDX*8), X3 - ADDPD 32(Y_PTR)(IDX*8), X4 - ADDPD 48(Y_PTR)(IDX*8), X5 - - MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i - MOVUPS X3, 16(DST_PTR)(IDX*8) - MOVUPS X4, 32(DST_PTR)(IDX*8) - MOVUPS X5, 48(DST_PTR)(IDX*8) - - ADDQ $8, IDX // i += 8 - DECQ LEN - JNZ loop // } while --LEN > 0 - CMPQ TAIL, $0 // if TAIL == 0 { return } - JE end - -tail_start: // Reset loop registers - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( TAIL / 2 ) - JZ tail_one // if LEN == 0 { goto tail } - -tail_two: // do { - MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] - MULPD ALPHA, X2 // X2 *= alpha - ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] - MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 - ADDQ $2, IDX // i += 2 - DECQ LEN - JNZ tail_two // } while --LEN > 0 - - ANDQ $1, TAIL - JZ end // if TAIL == 0 { goto end } - -tail_one: - MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] - MULSD ALPHA, X2 // X2 *= a - ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] - MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s deleted file mode 100644 index 32bd1572b..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -TEXT ·CumProd(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ dst_len+8(FP), CX // CX = len(dst) - MOVQ s_base+24(FP), SI // SI = &s - CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) - CMOVQLE s_len+32(FP), CX - MOVQ CX, ret_len+56(FP) // len(ret) = CX - CMPQ CX, $0 // if CX == 0 { return } - JE cp_end - XORQ AX, AX // i = 0 - - MOVSD (SI), X5 // p_prod = { s[0], s[0] } - SHUFPD $0, X5, X5 - MOVSD X5, (DI) // dst[0] = s[0] - INCQ AX // ++i - DECQ CX // -- CX - JZ cp_end // if CX == 0 { return } - - MOVQ CX, BX - ANDQ $3, BX // BX = CX % 4 - SHRQ $2, CX // CX = floor( CX / 4 ) - JZ cp_tail_start // if CX == 0 { goto cp_tail_start } - -cp_loop: // Loop unrolled 4x do { - MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] - MOVUPS 16(SI)(AX*8), X2 - MOVAPS X0, X1 // X1 = X0 - MOVAPS X2, X3 - SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } - SHUFPD $1, X3, X3 - MULPD X0, X1 // X1 *= X0 - MULPD X2, X3 - SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } - SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } - SHUFPD $2, X3, X2 - SHUFPD $3, X3, X3 - MULPD X5, X0 // X0 *= p_prod - MULPD X1, X5 // p_prod *= X1 - MULPD X5, X2 - MOVUPS X0, (DI)(AX*8) // dst[i] = X0 - MOVUPS X2, 16(DI)(AX*8) - MULPD X3, X5 - ADDQ $4, AX // i += 4 - LOOP cp_loop // } while --CX > 0 - - // if BX == 0 { return } - CMPQ BX, $0 - JE cp_end - -cp_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - -cp_tail: // do { - MULSD (SI)(AX*8), X5 // p_prod *= s[i] - MOVSD X5, (DI)(AX*8) // dst[i] = p_prod - INCQ AX // ++i - LOOP cp_tail // } while --CX > 0 - -cp_end: - MOVQ DI, ret_base+48(FP) // &ret = &dst - MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) - MOVQ SI, ret_cap+64(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s deleted file mode 100644 index 10d7fdab9..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -TEXT ·CumSum(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ dst_len+8(FP), CX // CX = len(dst) - MOVQ s_base+24(FP), SI // SI = &s - CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) - CMOVQLE s_len+32(FP), CX - MOVQ CX, ret_len+56(FP) // len(ret) = CX - CMPQ CX, $0 // if CX == 0 { return } - JE cs_end - XORQ AX, AX // i = 0 - PXOR X5, X5 // p_sum = 0 - MOVQ CX, BX - ANDQ $3, BX // BX = CX % 4 - SHRQ $2, CX // CX = floor( CX / 4 ) - JZ cs_tail_start // if CX == 0 { goto cs_tail_start } - -cs_loop: // Loop unrolled 4x do { - MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] - MOVUPS 16(SI)(AX*8), X2 - MOVAPS X0, X1 // X1 = X0 - MOVAPS X2, X3 - SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } - SHUFPD $1, X3, X3 - ADDPD X0, X1 // X1 += X0 - ADDPD X2, X3 - SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } - SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } - SHUFPD $2, X3, X2 - SHUFPD $3, X3, X3 - ADDPD X5, X0 // X0 += p_sum - ADDPD X1, X5 // p_sum += X1 - ADDPD X5, X2 - MOVUPS X0, (DI)(AX*8) // dst[i] = X0 - MOVUPS X2, 16(DI)(AX*8) - ADDPD X3, X5 - ADDQ $4, AX // i += 4 - LOOP cs_loop // } while --CX > 0 - - // if BX == 0 { return } - CMPQ BX, $0 - JE cs_end - -cs_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - -cs_tail: // do { - ADDSD (SI)(AX*8), X5 // p_sum *= s[i] - MOVSD X5, (DI)(AX*8) // dst[i] = p_sum - INCQ AX // ++i - LOOP cs_tail // } while --CX > 0 - -cs_end: - MOVQ DI, ret_base+48(FP) // &ret = &dst - MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) - MOVQ SI, ret_cap+64(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s deleted file mode 100644 index 1a4e9eec9..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func Div(dst, s []float64) -TEXT ·Div(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ dst_len+8(FP), CX // CX = len(dst) - MOVQ s_base+24(FP), SI // SI = &s - CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) - CMOVQLE s_len+32(FP), CX - CMPQ CX, $0 // if CX == 0 { return } - JE div_end - XORQ AX, AX // i = 0 - MOVQ SI, BX - ANDQ $15, BX // BX = &s & 15 - JZ div_no_trim // if BX == 0 { goto div_no_trim } - - // Align on 16-bit boundary - MOVSD (DI)(AX*8), X0 // X0 = dst[i] - DIVSD (SI)(AX*8), X0 // X0 /= s[i] - MOVSD X0, (DI)(AX*8) // dst[i] = X0 - INCQ AX // ++i - DECQ CX // --CX - JZ div_end // if CX == 0 { return } - -div_no_trim: - MOVQ CX, BX - ANDQ $7, BX // BX = len(dst) % 8 - SHRQ $3, CX // CX = floor( len(dst) / 8 ) - JZ div_tail_start // if CX == 0 { goto div_tail_start } - -div_loop: // Loop unrolled 8x do { - MOVUPS (DI)(AX*8), X0 // X0 = dst[i:i+1] - MOVUPS 16(DI)(AX*8), X1 - MOVUPS 32(DI)(AX*8), X2 - MOVUPS 48(DI)(AX*8), X3 - DIVPD (SI)(AX*8), X0 // X0 /= s[i:i+1] - DIVPD 16(SI)(AX*8), X1 - DIVPD 32(SI)(AX*8), X2 - DIVPD 48(SI)(AX*8), X3 - MOVUPS X0, (DI)(AX*8) // dst[i] = X0 - MOVUPS X1, 16(DI)(AX*8) - MOVUPS X2, 32(DI)(AX*8) - MOVUPS X3, 48(DI)(AX*8) - ADDQ $8, AX // i += 8 - LOOP div_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE div_end - -div_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - -div_tail: // do { - MOVSD (DI)(AX*8), X0 // X0 = dst[i] - DIVSD (SI)(AX*8), X0 // X0 /= s[i] - MOVSD X0, (DI)(AX*8) // dst[i] = X0 - INCQ AX // ++i - LOOP div_tail // } while --CX > 0 - -div_end: - RET - diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s deleted file mode 100644 index 16ab9b7ec..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func DivTo(dst, x, y []float64) -TEXT ·DivTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DI // DI = &dst - MOVQ dst_len+8(FP), CX // CX = len(dst) - MOVQ x_base+24(FP), SI // SI = &x - MOVQ y_base+48(FP), DX // DX = &y - CMPQ x_len+32(FP), CX // CX = max( len(dst), len(x), len(y) ) - CMOVQLE x_len+32(FP), CX - CMPQ y_len+56(FP), CX - CMOVQLE y_len+56(FP), CX - MOVQ CX, ret_len+80(FP) // len(ret) = CX - CMPQ CX, $0 // if CX == 0 { return } - JE div_end - XORQ AX, AX // i = 0 - MOVQ DX, BX - ANDQ $15, BX // BX = &y & OxF - JZ div_no_trim // if BX == 0 { goto div_no_trim } - - // Align on 16-bit boundary - MOVSD (SI)(AX*8), X0 // X0 = s[i] - DIVSD (DX)(AX*8), X0 // X0 /= t[i] - MOVSD X0, (DI)(AX*8) // dst[i] = X0 - INCQ AX // ++i - DECQ CX // --CX - JZ div_end // if CX == 0 { return } - -div_no_trim: - MOVQ CX, BX - ANDQ $7, BX // BX = len(dst) % 8 - SHRQ $3, CX // CX = floor( len(dst) / 8 ) - JZ div_tail_start // if CX == 0 { goto div_tail_start } - -div_loop: // Loop unrolled 8x do { - MOVUPS (SI)(AX*8), X0 // X0 = x[i:i+1] - MOVUPS 16(SI)(AX*8), X1 - MOVUPS 32(SI)(AX*8), X2 - MOVUPS 48(SI)(AX*8), X3 - DIVPD (DX)(AX*8), X0 // X0 /= y[i:i+1] - DIVPD 16(DX)(AX*8), X1 - DIVPD 32(DX)(AX*8), X2 - DIVPD 48(DX)(AX*8), X3 - MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X0 - MOVUPS X1, 16(DI)(AX*8) - MOVUPS X2, 32(DI)(AX*8) - MOVUPS X3, 48(DI)(AX*8) - ADDQ $8, AX // i += 8 - LOOP div_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE div_end - -div_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - -div_tail: // do { - MOVSD (SI)(AX*8), X0 // X0 = x[i] - DIVSD (DX)(AX*8), X0 // X0 /= y[i] - MOVSD X0, (DI)(AX*8) - INCQ AX // ++i - LOOP div_tail // } while --CX > 0 - -div_end: - MOVQ DI, ret_base+72(FP) // &ret = &dst - MOVQ dst_cap+16(FP), DI // cap(ret) = cap(dst) - MOVQ DI, ret_cap+88(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go deleted file mode 100644 index 33c76c1e0..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package f64 provides float64 vector primitives. -package f64 // import "gonum.org/v1/gonum/internal/asm/f64" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go deleted file mode 100644 index b77138d1a..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package f64 - -// DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotUnitary(x, y []float64) (sum float64) { - for i, v := range x { - sum += y[i] * v - } - return sum -} - -// DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return sum -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s deleted file mode 100644 index 6daba1bae..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func DdotUnitary(x, y []float64) (sum float64) -// This function assumes len(y) >= len(x). -TEXT ·DotUnitary(SB), NOSPLIT, $0 - MOVQ x+0(FP), R8 - MOVQ x_len+8(FP), DI // n = len(x) - MOVQ y+24(FP), R9 - - MOVSD $(0.0), X7 // sum = 0 - MOVSD $(0.0), X8 // sum = 0 - - MOVQ $0, SI // i = 0 - SUBQ $4, DI // n -= 4 - JL tail_uni // if n < 0 goto tail_uni - -loop_uni: - // sum += x[i] * y[i] unrolled 4x. - MOVUPD 0(R8)(SI*8), X0 - MOVUPD 0(R9)(SI*8), X1 - MOVUPD 16(R8)(SI*8), X2 - MOVUPD 16(R9)(SI*8), X3 - MULPD X1, X0 - MULPD X3, X2 - ADDPD X0, X7 - ADDPD X2, X8 - - ADDQ $4, SI // i += 4 - SUBQ $4, DI // n -= 4 - JGE loop_uni // if n >= 0 goto loop_uni - -tail_uni: - ADDQ $4, DI // n += 4 - JLE end_uni // if n <= 0 goto end_uni - -onemore_uni: - // sum += x[i] * y[i] for the remaining 1-3 elements. - MOVSD 0(R8)(SI*8), X0 - MOVSD 0(R9)(SI*8), X1 - MULSD X1, X0 - ADDSD X0, X7 - - ADDQ $1, SI // i++ - SUBQ $1, DI // n-- - JNZ onemore_uni // if n != 0 goto onemore_uni - -end_uni: - // Add the four sums together. - ADDPD X8, X7 - MOVSD X7, X0 - UNPCKHPD X7, X7 - ADDSD X0, X7 - MOVSD X7, sum+48(FP) // Return final sum. - RET - -// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) -TEXT ·DotInc(SB), NOSPLIT, $0 - MOVQ x+0(FP), R8 - MOVQ y+24(FP), R9 - MOVQ n+48(FP), CX - MOVQ incX+56(FP), R11 - MOVQ incY+64(FP), R12 - MOVQ ix+72(FP), R13 - MOVQ iy+80(FP), R14 - - MOVSD $(0.0), X7 // sum = 0 - LEAQ (R8)(R13*8), SI // p = &x[ix] - LEAQ (R9)(R14*8), DI // q = &y[ix] - SHLQ $3, R11 // incX *= sizeof(float64) - SHLQ $3, R12 // indY *= sizeof(float64) - - SUBQ $2, CX // n -= 2 - JL tail_inc // if n < 0 goto tail_inc - -loop_inc: - // sum += *p * *q unrolled 2x. - MOVHPD (SI), X0 - MOVHPD (DI), X1 - ADDQ R11, SI // p += incX - ADDQ R12, DI // q += incY - MOVLPD (SI), X0 - MOVLPD (DI), X1 - ADDQ R11, SI // p += incX - ADDQ R12, DI // q += incY - - MULPD X1, X0 - ADDPD X0, X7 - - SUBQ $2, CX // n -= 2 - JGE loop_inc // if n >= 0 goto loop_inc - -tail_inc: - ADDQ $2, CX // n += 2 - JLE end_inc // if n <= 0 goto end_inc - - // sum += *p * *q for the last iteration if n is odd. - MOVSD (SI), X0 - MULSD (DI), X0 - ADDSD X0, X7 - -end_inc: - // Add the two sums together. - MOVSD X7, X0 - UNPCKHPD X7, X7 - ADDSD X0, X7 - MOVSD X7, sum+88(FP) // Return final sum. - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go deleted file mode 100644 index 00c99e932..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -package f64 - -// Ger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) - -// GemvN computes -// y = alpha * A * x + beta * y -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. -func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) - -// GemvT computes -// y = alpha * A^T * x + beta * y -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. -func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go deleted file mode 100644 index 2a1cfd5cd..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package f64 - -// Ger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) { - if incX == 1 && incY == 1 { - x = x[:m] - y = y[:n] - for i, xv := range x { - AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) - } - return - } - - var ky, kx uintptr - if int(incY) < 0 { - ky = uintptr(-int(n-1) * int(incY)) - } - if int(incX) < 0 { - kx = uintptr(-int(m-1) * int(incX)) - } - - ix := kx - for i := 0; i < int(m); i++ { - AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], n, incY, 1, ky, 0) - ix += incX - } -} - -// GemvN computes -// y = alpha * A * x + beta * y -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. -func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { - var kx, ky, i uintptr - if int(incX) < 0 { - kx = uintptr(-int(n-1) * int(incX)) - } - if int(incY) < 0 { - ky = uintptr(-int(m-1) * int(incY)) - } - - if incX == 1 && incY == 1 { - if beta == 0 { - for i = 0; i < m; i++ { - y[i] = alpha * DotUnitary(a[lda*i:lda*i+n], x) - } - return - } - for i = 0; i < m; i++ { - y[i] = y[i]*beta + alpha*DotUnitary(a[lda*i:lda*i+n], x) - } - return - } - iy := ky - if beta == 0 { - for i = 0; i < m; i++ { - y[iy] = alpha * DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) - iy += incY - } - return - } - for i = 0; i < m; i++ { - y[iy] = y[iy]*beta + alpha*DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) - iy += incY - } -} - -// GemvT computes -// y = alpha * A^T * x + beta * y -// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. -func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { - var kx, ky, i uintptr - if int(incX) < 0 { - kx = uintptr(-int(m-1) * int(incX)) - } - if int(incY) < 0 { - ky = uintptr(-int(n-1) * int(incY)) - } - switch { - case beta == 0: // beta == 0 is special-cased to memclear - if incY == 1 { - for i := range y { - y[i] = 0 - } - } else { - iy := ky - for i := 0; i < int(n); i++ { - y[iy] = 0 - iy += incY - } - } - case int(incY) < 0: - ScalInc(beta, y, n, uintptr(int(-incY))) - case incY == 1: - ScalUnitary(beta, y[:n]) - default: - ScalInc(beta, y, n, incY) - } - - if incX == 1 && incY == 1 { - for i = 0; i < m; i++ { - AxpyUnitaryTo(y, alpha*x[i], a[lda*i:lda*i+n], y) - } - return - } - ix := kx - for i = 0; i < m; i++ { - AxpyInc(alpha*x[ix], a[lda*i:lda*i+n], y, n, 1, incY, 0, ky) - ix += incX - } -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s deleted file mode 100644 index f0a98f0f8..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s +++ /dev/null @@ -1,685 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SIZE 8 - -#define M_DIM m+0(FP) -#define M CX -#define N_DIM n+8(FP) -#define N BX - -#define TMP1 R14 -#define TMP2 R15 - -#define X_PTR SI -#define X x_base+56(FP) -#define INC_X R8 -#define INC3_X R9 - -#define Y_PTR DX -#define Y y_base+96(FP) -#define INC_Y R10 -#define INC3_Y R11 - -#define A_ROW AX -#define A_PTR DI -#define LDA R12 -#define LDA3 R13 - -#define ALPHA X15 -#define BETA X14 - -#define INIT4 \ - XORPS X0, X0 \ - XORPS X1, X1 \ - XORPS X2, X2 \ - XORPS X3, X3 - -#define INIT2 \ - XORPS X0, X0 \ - XORPS X1, X1 - -#define INIT1 \ - XORPS X0, X0 - -#define KERNEL_LOAD4 \ - MOVUPS (X_PTR), X12 \ - MOVUPS 2*SIZE(X_PTR), X13 - -#define KERNEL_LOAD2 \ - MOVUPS (X_PTR), X12 - -#define KERNEL_LOAD4_INC \ - MOVSD (X_PTR), X12 \ - MOVHPD (X_PTR)(INC_X*1), X12 \ - MOVSD (X_PTR)(INC_X*2), X13 \ - MOVHPD (X_PTR)(INC3_X*1), X13 - -#define KERNEL_LOAD2_INC \ - MOVSD (X_PTR), X12 \ - MOVHPD (X_PTR)(INC_X*1), X12 - -#define KERNEL_4x4 \ - MOVUPS (A_PTR), X4 \ - MOVUPS 2*SIZE(A_PTR), X5 \ - MOVUPS (A_PTR)(LDA*1), X6 \ - MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ - MOVUPS (A_PTR)(LDA*2), X8 \ - MOVUPS 2*SIZE(A_PTR)(LDA*2), X9 \ - MOVUPS (A_PTR)(LDA3*1), X10 \ - MOVUPS 2*SIZE(A_PTR)(LDA3*1), X11 \ - MULPD X12, X4 \ - MULPD X13, X5 \ - MULPD X12, X6 \ - MULPD X13, X7 \ - MULPD X12, X8 \ - MULPD X13, X9 \ - MULPD X12, X10 \ - MULPD X13, X11 \ - ADDPD X4, X0 \ - ADDPD X5, X0 \ - ADDPD X6, X1 \ - ADDPD X7, X1 \ - ADDPD X8, X2 \ - ADDPD X9, X2 \ - ADDPD X10, X3 \ - ADDPD X11, X3 \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_4x2 \ - MOVUPS (A_PTR), X4 \ - MOVUPS (A_PTR)(LDA*1), X5 \ - MOVUPS (A_PTR)(LDA*2), X6 \ - MOVUPS (A_PTR)(LDA3*1), X7 \ - MULPD X12, X4 \ - MULPD X12, X5 \ - MULPD X12, X6 \ - MULPD X12, X7 \ - ADDPD X4, X0 \ - ADDPD X5, X1 \ - ADDPD X6, X2 \ - ADDPD X7, X3 \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_4x1 \ - MOVDDUP (X_PTR), X12 \ - MOVSD (A_PTR), X4 \ - MOVHPD (A_PTR)(LDA*1), X4 \ - MOVSD (A_PTR)(LDA*2), X5 \ - MOVHPD (A_PTR)(LDA3*1), X5 \ - MULPD X12, X4 \ - MULPD X12, X5 \ - ADDPD X4, X0 \ - ADDPD X5, X2 \ - ADDQ $SIZE, A_PTR - -#define STORE4 \ - MOVUPS (Y_PTR), X4 \ - MOVUPS 2*SIZE(Y_PTR), X5 \ - MULPD ALPHA, X0 \ - MULPD ALPHA, X2 \ - MULPD BETA, X4 \ - MULPD BETA, X5 \ - ADDPD X0, X4 \ - ADDPD X2, X5 \ - MOVUPS X4, (Y_PTR) \ - MOVUPS X5, 2*SIZE(Y_PTR) - -#define STORE4_INC \ - MOVSD (Y_PTR), X4 \ - MOVHPD (Y_PTR)(INC_Y*1), X4 \ - MOVSD (Y_PTR)(INC_Y*2), X5 \ - MOVHPD (Y_PTR)(INC3_Y*1), X5 \ - MULPD ALPHA, X0 \ - MULPD ALPHA, X2 \ - MULPD BETA, X4 \ - MULPD BETA, X5 \ - ADDPD X0, X4 \ - ADDPD X2, X5 \ - MOVLPD X4, (Y_PTR) \ - MOVHPD X4, (Y_PTR)(INC_Y*1) \ - MOVLPD X5, (Y_PTR)(INC_Y*2) \ - MOVHPD X5, (Y_PTR)(INC3_Y*1) - -#define KERNEL_2x4 \ - MOVUPS (A_PTR), X8 \ - MOVUPS 2*SIZE(A_PTR), X9 \ - MOVUPS (A_PTR)(LDA*1), X10 \ - MOVUPS 2*SIZE(A_PTR)(LDA*1), X11 \ - MULPD X12, X8 \ - MULPD X13, X9 \ - MULPD X12, X10 \ - MULPD X13, X11 \ - ADDPD X8, X0 \ - ADDPD X10, X1 \ - ADDPD X9, X0 \ - ADDPD X11, X1 \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_2x2 \ - MOVUPS (A_PTR), X8 \ - MOVUPS (A_PTR)(LDA*1), X9 \ - MULPD X12, X8 \ - MULPD X12, X9 \ - ADDPD X8, X0 \ - ADDPD X9, X1 \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_2x1 \ - MOVDDUP (X_PTR), X12 \ - MOVSD (A_PTR), X8 \ - MOVHPD (A_PTR)(LDA*1), X8 \ - MULPD X12, X8 \ - ADDPD X8, X0 \ - ADDQ $SIZE, A_PTR - -#define STORE2 \ - MOVUPS (Y_PTR), X4 \ - MULPD ALPHA, X0 \ - MULPD BETA, X4 \ - ADDPD X0, X4 \ - MOVUPS X4, (Y_PTR) - -#define STORE2_INC \ - MOVSD (Y_PTR), X4 \ - MOVHPD (Y_PTR)(INC_Y*1), X4 \ - MULPD ALPHA, X0 \ - MULPD BETA, X4 \ - ADDPD X0, X4 \ - MOVSD X4, (Y_PTR) \ - MOVHPD X4, (Y_PTR)(INC_Y*1) - -#define KERNEL_1x4 \ - MOVUPS (A_PTR), X8 \ - MOVUPS 2*SIZE(A_PTR), X9 \ - MULPD X12, X8 \ - MULPD X13, X9 \ - ADDPD X8, X0 \ - ADDPD X9, X0 \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_1x2 \ - MOVUPS (A_PTR), X8 \ - MULPD X12, X8 \ - ADDPD X8, X0 \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_1x1 \ - MOVSD (X_PTR), X12 \ - MOVSD (A_PTR), X8 \ - MULSD X12, X8 \ - ADDSD X8, X0 \ - ADDQ $SIZE, A_PTR - -#define STORE1 \ - HADDPD X0, X0 \ - MOVSD (Y_PTR), X4 \ - MULSD ALPHA, X0 \ - MULSD BETA, X4 \ - ADDSD X0, X4 \ - MOVSD X4, (Y_PTR) - -// func GemvN(m, n int, -// alpha float64, -// a []float64, lda int, -// x []float64, incX int, -// beta float64, -// y []float64, incY int) -TEXT ·GemvN(SB), NOSPLIT, $32-128 - MOVQ M_DIM, M - MOVQ N_DIM, N - CMPQ M, $0 - JE end - CMPQ N, $0 - JE end - - MOVDDUP alpha+16(FP), ALPHA - MOVDDUP beta+88(FP), BETA - - MOVQ x_base+56(FP), X_PTR - MOVQ y_base+96(FP), Y_PTR - MOVQ a_base+24(FP), A_ROW - MOVQ incY+120(FP), INC_Y - MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) - SHLQ $3, LDA - LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 - MOVQ A_ROW, A_PTR - - XORQ TMP2, TMP2 - MOVQ M, TMP1 - SUBQ $1, TMP1 - IMULQ INC_Y, TMP1 - NEGQ TMP1 - CMPQ INC_Y, $0 - CMOVQLT TMP1, TMP2 - LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR - MOVQ Y_PTR, Y - - SHLQ $3, INC_Y // INC_Y = incY * sizeof(float64) - LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 - - MOVSD $0.0, X0 - COMISD BETA, X0 - JNE gemv_start // if beta != 0 { goto gemv_start } - -gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) - XORPS X0, X0 - XORPS X1, X1 - XORPS X2, X2 - XORPS X3, X3 - - CMPQ incY+120(FP), $1 // Check for dense vector X (fast-path) - JNE inc_clear - - SHRQ $3, M - JZ clear4 - -clear8: - MOVUPS X0, (Y_PTR) - MOVUPS X1, 16(Y_PTR) - MOVUPS X2, 32(Y_PTR) - MOVUPS X3, 48(Y_PTR) - ADDQ $8*SIZE, Y_PTR - DECQ M - JNZ clear8 - -clear4: - TESTQ $4, M_DIM - JZ clear2 - MOVUPS X0, (Y_PTR) - MOVUPS X1, 16(Y_PTR) - ADDQ $4*SIZE, Y_PTR - -clear2: - TESTQ $2, M_DIM - JZ clear1 - MOVUPS X0, (Y_PTR) - ADDQ $2*SIZE, Y_PTR - -clear1: - TESTQ $1, M_DIM - JZ prep_end - MOVSD X0, (Y_PTR) - - JMP prep_end - -inc_clear: - SHRQ $2, M - JZ inc_clear2 - -inc_clear4: - MOVSD X0, (Y_PTR) - MOVSD X1, (Y_PTR)(INC_Y*1) - MOVSD X2, (Y_PTR)(INC_Y*2) - MOVSD X3, (Y_PTR)(INC3_Y*1) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ M - JNZ inc_clear4 - -inc_clear2: - TESTQ $2, M_DIM - JZ inc_clear1 - MOVSD X0, (Y_PTR) - MOVSD X1, (Y_PTR)(INC_Y*1) - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_clear1: - TESTQ $1, M_DIM - JZ prep_end - MOVSD X0, (Y_PTR) - -prep_end: - MOVQ Y, Y_PTR - MOVQ M_DIM, M - -gemv_start: - CMPQ incX+80(FP), $1 // Check for dense vector X (fast-path) - JNE inc - - SHRQ $2, M - JZ r2 - -r4: - // LOAD 4 - INIT4 - - MOVQ N_DIM, N - SHRQ $2, N - JZ r4c2 - -r4c4: - // 4x4 KERNEL - KERNEL_LOAD4 - KERNEL_4x4 - - ADDQ $4*SIZE, X_PTR - - DECQ N - JNZ r4c4 - -r4c2: - TESTQ $2, N_DIM - JZ r4c1 - - // 4x2 KERNEL - KERNEL_LOAD2 - KERNEL_4x2 - - ADDQ $2*SIZE, X_PTR - -r4c1: - HADDPD X1, X0 - HADDPD X3, X2 - TESTQ $1, N_DIM - JZ r4end - - // 4x1 KERNEL - KERNEL_4x1 - - ADDQ $SIZE, X_PTR - -r4end: - CMPQ INC_Y, $SIZE - JNZ r4st_inc - - STORE4 - ADDQ $4*SIZE, Y_PTR - JMP r4inc - -r4st_inc: - STORE4_INC - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - -r4inc: - MOVQ X, X_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ M - JNZ r4 - -r2: - TESTQ $2, M_DIM - JZ r1 - - // LOAD 2 - INIT2 - - MOVQ N_DIM, N - SHRQ $2, N - JZ r2c2 - -r2c4: - // 2x4 KERNEL - KERNEL_LOAD4 - KERNEL_2x4 - - ADDQ $4*SIZE, X_PTR - - DECQ N - JNZ r2c4 - -r2c2: - TESTQ $2, N_DIM - JZ r2c1 - - // 2x2 KERNEL - KERNEL_LOAD2 - KERNEL_2x2 - - ADDQ $2*SIZE, X_PTR - -r2c1: - HADDPD X1, X0 - TESTQ $1, N_DIM - JZ r2end - - // 2x1 KERNEL - KERNEL_2x1 - - ADDQ $SIZE, X_PTR - -r2end: - CMPQ INC_Y, $SIZE - JNE r2st_inc - - STORE2 - ADDQ $2*SIZE, Y_PTR - JMP r2inc - -r2st_inc: - STORE2_INC - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -r2inc: - MOVQ X, X_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -r1: - TESTQ $1, M_DIM - JZ end - - // LOAD 1 - INIT1 - - MOVQ N_DIM, N - SHRQ $2, N - JZ r1c2 - -r1c4: - // 1x4 KERNEL - KERNEL_LOAD4 - KERNEL_1x4 - - ADDQ $4*SIZE, X_PTR - - DECQ N - JNZ r1c4 - -r1c2: - TESTQ $2, N_DIM - JZ r1c1 - - // 1x2 KERNEL - KERNEL_LOAD2 - KERNEL_1x2 - - ADDQ $2*SIZE, X_PTR - -r1c1: - - TESTQ $1, N_DIM - JZ r1end - - // 1x1 KERNEL - KERNEL_1x1 - -r1end: - STORE1 - -end: - RET - -inc: // Algorithm for incX != 1 ( split loads in kernel ) - MOVQ incX+80(FP), INC_X // INC_X = incX - - XORQ TMP2, TMP2 // TMP2 = 0 - MOVQ N, TMP1 // TMP1 = N - SUBQ $1, TMP1 // TMP1 -= 1 - NEGQ TMP1 // TMP1 = -TMP1 - IMULQ INC_X, TMP1 // TMP1 *= INC_X - CMPQ INC_X, $0 // if INC_X < 0 { TMP2 = TMP1 } - CMOVQLT TMP1, TMP2 - LEAQ (X_PTR)(TMP2*SIZE), X_PTR // X_PTR = X_PTR[TMP2] - MOVQ X_PTR, X // X = X_PTR - - SHLQ $3, INC_X - LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 - - SHRQ $2, M - JZ inc_r2 - -inc_r4: - // LOAD 4 - INIT4 - - MOVQ N_DIM, N - SHRQ $2, N - JZ inc_r4c2 - -inc_r4c4: - // 4x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_4x4 - - LEAQ (X_PTR)(INC_X*4), X_PTR - - DECQ N - JNZ inc_r4c4 - -inc_r4c2: - TESTQ $2, N_DIM - JZ inc_r4c1 - - // 4x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_4x2 - - LEAQ (X_PTR)(INC_X*2), X_PTR - -inc_r4c1: - HADDPD X1, X0 - HADDPD X3, X2 - TESTQ $1, N_DIM - JZ inc_r4end - - // 4x1 KERNEL - KERNEL_4x1 - - ADDQ INC_X, X_PTR - -inc_r4end: - CMPQ INC_Y, $SIZE - JNE inc_r4st_inc - - STORE4 - ADDQ $4*SIZE, Y_PTR - JMP inc_r4inc - -inc_r4st_inc: - STORE4_INC - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - -inc_r4inc: - MOVQ X, X_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ M - JNZ inc_r4 - -inc_r2: - TESTQ $2, M_DIM - JZ inc_r1 - - // LOAD 2 - INIT2 - - MOVQ N_DIM, N - SHRQ $2, N - JZ inc_r2c2 - -inc_r2c4: - // 2x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_2x4 - - LEAQ (X_PTR)(INC_X*4), X_PTR - DECQ N - JNZ inc_r2c4 - -inc_r2c2: - TESTQ $2, N_DIM - JZ inc_r2c1 - - // 2x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_2x2 - - LEAQ (X_PTR)(INC_X*2), X_PTR - -inc_r2c1: - HADDPD X1, X0 - TESTQ $1, N_DIM - JZ inc_r2end - - // 2x1 KERNEL - KERNEL_2x1 - - ADDQ INC_X, X_PTR - -inc_r2end: - CMPQ INC_Y, $SIZE - JNE inc_r2st_inc - - STORE2 - ADDQ $2*SIZE, Y_PTR - JMP inc_r2inc - -inc_r2st_inc: - STORE2_INC - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_r2inc: - MOVQ X, X_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -inc_r1: - TESTQ $1, M_DIM - JZ inc_end - - // LOAD 1 - INIT1 - - MOVQ N_DIM, N - SHRQ $2, N - JZ inc_r1c2 - -inc_r1c4: - // 1x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_1x4 - - LEAQ (X_PTR)(INC_X*4), X_PTR - DECQ N - JNZ inc_r1c4 - -inc_r1c2: - TESTQ $2, N_DIM - JZ inc_r1c1 - - // 1x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_1x2 - - LEAQ (X_PTR)(INC_X*2), X_PTR - -inc_r1c1: - TESTQ $1, N_DIM - JZ inc_r1end - - // 1x1 KERNEL - KERNEL_1x1 - -inc_r1end: - STORE1 - -inc_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s deleted file mode 100644 index 87a9445e9..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s +++ /dev/null @@ -1,745 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SIZE 8 - -#define M_DIM n+8(FP) -#define M CX -#define N_DIM m+0(FP) -#define N BX - -#define TMP1 R14 -#define TMP2 R15 - -#define X_PTR SI -#define X x_base+56(FP) -#define Y_PTR DX -#define Y y_base+96(FP) -#define A_ROW AX -#define A_PTR DI - -#define INC_X R8 -#define INC3_X R9 - -#define INC_Y R10 -#define INC3_Y R11 - -#define LDA R12 -#define LDA3 R13 - -#define ALPHA X15 -#define BETA X14 - -#define INIT4 \ - MOVDDUP (X_PTR), X8 \ - MOVDDUP (X_PTR)(INC_X*1), X9 \ - MOVDDUP (X_PTR)(INC_X*2), X10 \ - MOVDDUP (X_PTR)(INC3_X*1), X11 \ - MULPD ALPHA, X8 \ - MULPD ALPHA, X9 \ - MULPD ALPHA, X10 \ - MULPD ALPHA, X11 - -#define INIT2 \ - MOVDDUP (X_PTR), X8 \ - MOVDDUP (X_PTR)(INC_X*1), X9 \ - MULPD ALPHA, X8 \ - MULPD ALPHA, X9 - -#define INIT1 \ - MOVDDUP (X_PTR), X8 \ - MULPD ALPHA, X8 - -#define KERNEL_LOAD4 \ - MOVUPS (Y_PTR), X0 \ - MOVUPS 2*SIZE(Y_PTR), X1 - -#define KERNEL_LOAD2 \ - MOVUPS (Y_PTR), X0 - -#define KERNEL_LOAD4_INC \ - MOVSD (Y_PTR), X0 \ - MOVHPD (Y_PTR)(INC_Y*1), X0 \ - MOVSD (Y_PTR)(INC_Y*2), X1 \ - MOVHPD (Y_PTR)(INC3_Y*1), X1 - -#define KERNEL_LOAD2_INC \ - MOVSD (Y_PTR), X0 \ - MOVHPD (Y_PTR)(INC_Y*1), X0 - -#define KERNEL_4x4 \ - MOVUPS (A_PTR), X4 \ - MOVUPS 2*SIZE(A_PTR), X5 \ - MOVUPS (A_PTR)(LDA*1), X6 \ - MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ - MULPD X8, X4 \ - MULPD X8, X5 \ - MULPD X9, X6 \ - MULPD X9, X7 \ - ADDPD X4, X0 \ - ADDPD X5, X1 \ - ADDPD X6, X0 \ - ADDPD X7, X1 \ - MOVUPS (A_PTR)(LDA*2), X4 \ - MOVUPS 2*SIZE(A_PTR)(LDA*2), X5 \ - MOVUPS (A_PTR)(LDA3*1), X6 \ - MOVUPS 2*SIZE(A_PTR)(LDA3*1), X7 \ - MULPD X10, X4 \ - MULPD X10, X5 \ - MULPD X11, X6 \ - MULPD X11, X7 \ - ADDPD X4, X0 \ - ADDPD X5, X1 \ - ADDPD X6, X0 \ - ADDPD X7, X1 \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_4x2 \ - MOVUPS (A_PTR), X4 \ - MOVUPS 2*SIZE(A_PTR), X5 \ - MOVUPS (A_PTR)(LDA*1), X6 \ - MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ - MULPD X8, X4 \ - MULPD X8, X5 \ - MULPD X9, X6 \ - MULPD X9, X7 \ - ADDPD X4, X0 \ - ADDPD X5, X1 \ - ADDPD X6, X0 \ - ADDPD X7, X1 \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_4x1 \ - MOVUPS (A_PTR), X4 \ - MOVUPS 2*SIZE(A_PTR), X5 \ - MULPD X8, X4 \ - MULPD X8, X5 \ - ADDPD X4, X0 \ - ADDPD X5, X1 \ - ADDQ $4*SIZE, A_PTR - -#define STORE4 \ - MOVUPS X0, (Y_PTR) \ - MOVUPS X1, 2*SIZE(Y_PTR) - -#define STORE4_INC \ - MOVLPD X0, (Y_PTR) \ - MOVHPD X0, (Y_PTR)(INC_Y*1) \ - MOVLPD X1, (Y_PTR)(INC_Y*2) \ - MOVHPD X1, (Y_PTR)(INC3_Y*1) - -#define KERNEL_2x4 \ - MOVUPS (A_PTR), X4 \ - MOVUPS (A_PTR)(LDA*1), X5 \ - MOVUPS (A_PTR)(LDA*2), X6 \ - MOVUPS (A_PTR)(LDA3*1), X7 \ - MULPD X8, X4 \ - MULPD X9, X5 \ - MULPD X10, X6 \ - MULPD X11, X7 \ - ADDPD X4, X0 \ - ADDPD X5, X0 \ - ADDPD X6, X0 \ - ADDPD X7, X0 \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_2x2 \ - MOVUPS (A_PTR), X4 \ - MOVUPS (A_PTR)(LDA*1), X5 \ - MULPD X8, X4 \ - MULPD X9, X5 \ - ADDPD X4, X0 \ - ADDPD X5, X0 \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_2x1 \ - MOVUPS (A_PTR), X4 \ - MULPD X8, X4 \ - ADDPD X4, X0 \ - ADDQ $2*SIZE, A_PTR - -#define STORE2 \ - MOVUPS X0, (Y_PTR) - -#define STORE2_INC \ - MOVLPD X0, (Y_PTR) \ - MOVHPD X0, (Y_PTR)(INC_Y*1) - -#define KERNEL_1x4 \ - MOVSD (Y_PTR), X0 \ - MOVSD (A_PTR), X4 \ - MOVSD (A_PTR)(LDA*1), X5 \ - MOVSD (A_PTR)(LDA*2), X6 \ - MOVSD (A_PTR)(LDA3*1), X7 \ - MULSD X8, X4 \ - MULSD X9, X5 \ - MULSD X10, X6 \ - MULSD X11, X7 \ - ADDSD X4, X0 \ - ADDSD X5, X0 \ - ADDSD X6, X0 \ - ADDSD X7, X0 \ - MOVSD X0, (Y_PTR) \ - ADDQ $SIZE, A_PTR - -#define KERNEL_1x2 \ - MOVSD (Y_PTR), X0 \ - MOVSD (A_PTR), X4 \ - MOVSD (A_PTR)(LDA*1), X5 \ - MULSD X8, X4 \ - MULSD X9, X5 \ - ADDSD X4, X0 \ - ADDSD X5, X0 \ - MOVSD X0, (Y_PTR) \ - ADDQ $SIZE, A_PTR - -#define KERNEL_1x1 \ - MOVSD (Y_PTR), X0 \ - MOVSD (A_PTR), X4 \ - MULSD X8, X4 \ - ADDSD X4, X0 \ - MOVSD X0, (Y_PTR) \ - ADDQ $SIZE, A_PTR - -#define SCALE_8(PTR, SCAL) \ - MOVUPS (PTR), X0 \ - MOVUPS 16(PTR), X1 \ - MOVUPS 32(PTR), X2 \ - MOVUPS 48(PTR), X3 \ - MULPD SCAL, X0 \ - MULPD SCAL, X1 \ - MULPD SCAL, X2 \ - MULPD SCAL, X3 \ - MOVUPS X0, (PTR) \ - MOVUPS X1, 16(PTR) \ - MOVUPS X2, 32(PTR) \ - MOVUPS X3, 48(PTR) - -#define SCALE_4(PTR, SCAL) \ - MOVUPS (PTR), X0 \ - MOVUPS 16(PTR), X1 \ - MULPD SCAL, X0 \ - MULPD SCAL, X1 \ - MOVUPS X0, (PTR) \ - MOVUPS X1, 16(PTR) \ - -#define SCALE_2(PTR, SCAL) \ - MOVUPS (PTR), X0 \ - MULPD SCAL, X0 \ - MOVUPS X0, (PTR) \ - -#define SCALE_1(PTR, SCAL) \ - MOVSD (PTR), X0 \ - MULSD SCAL, X0 \ - MOVSD X0, (PTR) \ - -#define SCALEINC_4(PTR, INC, INC3, SCAL) \ - MOVSD (PTR), X0 \ - MOVSD (PTR)(INC*1), X1 \ - MOVSD (PTR)(INC*2), X2 \ - MOVSD (PTR)(INC3*1), X3 \ - MULSD SCAL, X0 \ - MULSD SCAL, X1 \ - MULSD SCAL, X2 \ - MULSD SCAL, X3 \ - MOVSD X0, (PTR) \ - MOVSD X1, (PTR)(INC*1) \ - MOVSD X2, (PTR)(INC*2) \ - MOVSD X3, (PTR)(INC3*1) - -#define SCALEINC_2(PTR, INC, SCAL) \ - MOVSD (PTR), X0 \ - MOVSD (PTR)(INC*1), X1 \ - MULSD SCAL, X0 \ - MULSD SCAL, X1 \ - MOVSD X0, (PTR) \ - MOVSD X1, (PTR)(INC*1) - -// func GemvT(m, n int, -// alpha float64, -// a []float64, lda int, -// x []float64, incX int, -// beta float64, -// y []float64, incY int) -TEXT ·GemvT(SB), NOSPLIT, $32-128 - MOVQ M_DIM, M - MOVQ N_DIM, N - CMPQ M, $0 - JE end - CMPQ N, $0 - JE end - - MOVDDUP alpha+16(FP), ALPHA - - MOVQ x_base+56(FP), X_PTR - MOVQ y_base+96(FP), Y_PTR - MOVQ a_base+24(FP), A_ROW - MOVQ incY+120(FP), INC_Y // INC_Y = incY * sizeof(float64) - MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) - SHLQ $3, LDA - LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 - MOVQ A_ROW, A_PTR - - MOVQ incX+80(FP), INC_X // INC_X = incX * sizeof(float64) - - XORQ TMP2, TMP2 - MOVQ N, TMP1 - SUBQ $1, TMP1 - NEGQ TMP1 - IMULQ INC_X, TMP1 - CMPQ INC_X, $0 - CMOVQLT TMP1, TMP2 - LEAQ (X_PTR)(TMP2*SIZE), X_PTR - MOVQ X_PTR, X - - SHLQ $3, INC_X - LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 - - CMPQ incY+120(FP), $1 // Check for dense vector Y (fast-path) - JNE inc - - MOVSD $1.0, X0 - COMISD beta+88(FP), X0 - JE gemv_start - - MOVSD $0.0, X0 - COMISD beta+88(FP), X0 - JE gemv_clear - - MOVDDUP beta+88(FP), BETA - SHRQ $3, M - JZ scal4 - -scal8: - SCALE_8(Y_PTR, BETA) - ADDQ $8*SIZE, Y_PTR - DECQ M - JNZ scal8 - -scal4: - TESTQ $4, M_DIM - JZ scal2 - SCALE_4(Y_PTR, BETA) - ADDQ $4*SIZE, Y_PTR - -scal2: - TESTQ $2, M_DIM - JZ scal1 - SCALE_2(Y_PTR, BETA) - ADDQ $2*SIZE, Y_PTR - -scal1: - TESTQ $1, M_DIM - JZ prep_end - SCALE_1(Y_PTR, BETA) - - JMP prep_end - -gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) - XORPS X0, X0 - XORPS X1, X1 - XORPS X2, X2 - XORPS X3, X3 - - SHRQ $3, M - JZ clear4 - -clear8: - MOVUPS X0, (Y_PTR) - MOVUPS X1, 16(Y_PTR) - MOVUPS X2, 32(Y_PTR) - MOVUPS X3, 48(Y_PTR) - ADDQ $8*SIZE, Y_PTR - DECQ M - JNZ clear8 - -clear4: - TESTQ $4, M_DIM - JZ clear2 - MOVUPS X0, (Y_PTR) - MOVUPS X1, 16(Y_PTR) - ADDQ $4*SIZE, Y_PTR - -clear2: - TESTQ $2, M_DIM - JZ clear1 - MOVUPS X0, (Y_PTR) - ADDQ $2*SIZE, Y_PTR - -clear1: - TESTQ $1, M_DIM - JZ prep_end - MOVSD X0, (Y_PTR) - -prep_end: - MOVQ Y, Y_PTR - MOVQ M_DIM, M - -gemv_start: - SHRQ $2, N - JZ c2 - -c4: - // LOAD 4 - INIT4 - - MOVQ M_DIM, M - SHRQ $2, M - JZ c4r2 - -c4r4: - // 4x4 KERNEL - KERNEL_LOAD4 - KERNEL_4x4 - STORE4 - - ADDQ $4*SIZE, Y_PTR - - DECQ M - JNZ c4r4 - -c4r2: - TESTQ $2, M_DIM - JZ c4r1 - - // 4x2 KERNEL - KERNEL_LOAD2 - KERNEL_2x4 - STORE2 - - ADDQ $2*SIZE, Y_PTR - -c4r1: - TESTQ $1, M_DIM - JZ c4end - - // 4x1 KERNEL - KERNEL_1x4 - - ADDQ $SIZE, Y_PTR - -c4end: - LEAQ (X_PTR)(INC_X*4), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ N - JNZ c4 - -c2: - TESTQ $2, N_DIM - JZ c1 - - // LOAD 2 - INIT2 - - MOVQ M_DIM, M - SHRQ $2, M - JZ c2r2 - -c2r4: - // 2x4 KERNEL - KERNEL_LOAD4 - KERNEL_4x2 - STORE4 - - ADDQ $4*SIZE, Y_PTR - - DECQ M - JNZ c2r4 - -c2r2: - TESTQ $2, M_DIM - JZ c2r1 - - // 2x2 KERNEL - KERNEL_LOAD2 - KERNEL_2x2 - STORE2 - - ADDQ $2*SIZE, Y_PTR - -c2r1: - TESTQ $1, M_DIM - JZ c2end - - // 2x1 KERNEL - KERNEL_1x2 - - ADDQ $SIZE, Y_PTR - -c2end: - LEAQ (X_PTR)(INC_X*2), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -c1: - TESTQ $1, N_DIM - JZ end - - // LOAD 1 - INIT1 - - MOVQ M_DIM, M - SHRQ $2, M - JZ c1r2 - -c1r4: - // 1x4 KERNEL - KERNEL_LOAD4 - KERNEL_4x1 - STORE4 - - ADDQ $4*SIZE, Y_PTR - - DECQ M - JNZ c1r4 - -c1r2: - TESTQ $2, M_DIM - JZ c1r1 - - // 1x2 KERNEL - KERNEL_LOAD2 - KERNEL_2x1 - STORE2 - - ADDQ $2*SIZE, Y_PTR - -c1r1: - TESTQ $1, M_DIM - JZ end - - // 1x1 KERNEL - KERNEL_1x1 - -end: - RET - -inc: // Algorithm for incX != 0 ( split loads in kernel ) - XORQ TMP2, TMP2 - MOVQ M, TMP1 - SUBQ $1, TMP1 - IMULQ INC_Y, TMP1 - NEGQ TMP1 - CMPQ INC_Y, $0 - CMOVQLT TMP1, TMP2 - LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR - MOVQ Y_PTR, Y - - SHLQ $3, INC_Y - LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 - - MOVSD $1.0, X0 - COMISD beta+88(FP), X0 - JE inc_gemv_start - - MOVSD $0.0, X0 - COMISD beta+88(FP), X0 - JE inc_gemv_clear - - MOVDDUP beta+88(FP), BETA - SHRQ $2, M - JZ inc_scal2 - -inc_scal4: - SCALEINC_4(Y_PTR, INC_Y, INC3_Y, BETA) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ M - JNZ inc_scal4 - -inc_scal2: - TESTQ $2, M_DIM - JZ inc_scal1 - - SCALEINC_2(Y_PTR, INC_Y, BETA) - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_scal1: - TESTQ $1, M_DIM - JZ inc_prep_end - SCALE_1(Y_PTR, BETA) - - JMP inc_prep_end - -inc_gemv_clear: // beta == 0 is special-cased to clear memory (no nan handling) - XORPS X0, X0 - XORPS X1, X1 - XORPS X2, X2 - XORPS X3, X3 - - SHRQ $2, M - JZ inc_clear2 - -inc_clear4: - MOVSD X0, (Y_PTR) - MOVSD X1, (Y_PTR)(INC_Y*1) - MOVSD X2, (Y_PTR)(INC_Y*2) - MOVSD X3, (Y_PTR)(INC3_Y*1) - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ M - JNZ inc_clear4 - -inc_clear2: - TESTQ $2, M_DIM - JZ inc_clear1 - MOVSD X0, (Y_PTR) - MOVSD X1, (Y_PTR)(INC_Y*1) - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_clear1: - TESTQ $1, M_DIM - JZ inc_prep_end - MOVSD X0, (Y_PTR) - -inc_prep_end: - MOVQ Y, Y_PTR - MOVQ M_DIM, M - -inc_gemv_start: - SHRQ $2, N - JZ inc_c2 - -inc_c4: - // LOAD 4 - INIT4 - - MOVQ M_DIM, M - SHRQ $2, M - JZ inc_c4r2 - -inc_c4r4: - // 4x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_4x4 - STORE4_INC - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - - DECQ M - JNZ inc_c4r4 - -inc_c4r2: - TESTQ $2, M_DIM - JZ inc_c4r1 - - // 4x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_2x4 - STORE2_INC - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_c4r1: - TESTQ $1, M_DIM - JZ inc_c4end - - // 4x1 KERNEL - KERNEL_1x4 - - ADDQ INC_Y, Y_PTR - -inc_c4end: - LEAQ (X_PTR)(INC_X*4), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ N - JNZ inc_c4 - -inc_c2: - TESTQ $2, N_DIM - JZ inc_c1 - - // LOAD 2 - INIT2 - - MOVQ M_DIM, M - SHRQ $2, M - JZ inc_c2r2 - -inc_c2r4: - // 2x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_4x2 - STORE4_INC - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ M - JNZ inc_c2r4 - -inc_c2r2: - TESTQ $2, M_DIM - JZ inc_c2r1 - - // 2x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_2x2 - STORE2_INC - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_c2r1: - TESTQ $1, M_DIM - JZ inc_c2end - - // 2x1 KERNEL - KERNEL_1x2 - - ADDQ INC_Y, Y_PTR - -inc_c2end: - LEAQ (X_PTR)(INC_X*2), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -inc_c1: - TESTQ $1, N_DIM - JZ inc_end - - // LOAD 1 - INIT1 - - MOVQ M_DIM, M - SHRQ $2, M - JZ inc_c1r2 - -inc_c1r4: - // 1x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_4x1 - STORE4_INC - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ M - JNZ inc_c1r4 - -inc_c1r2: - TESTQ $2, M_DIM - JZ inc_c1r1 - - // 1x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_2x1 - STORE2_INC - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_c1r1: - TESTQ $1, M_DIM - JZ inc_end - - // 1x1 KERNEL - KERNEL_1x1 - -inc_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s deleted file mode 100644 index 7ae5cf7c4..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s +++ /dev/null @@ -1,591 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define SIZE 8 - -#define M_DIM m+0(FP) -#define M CX -#define N_DIM n+8(FP) -#define N BX - -#define TMP1 R14 -#define TMP2 R15 - -#define X_PTR SI -#define Y y_base+56(FP) -#define Y_PTR DX -#define A_ROW AX -#define A_PTR DI - -#define INC_X R8 -#define INC3_X R9 - -#define INC_Y R10 -#define INC3_Y R11 - -#define LDA R12 -#define LDA3 R13 - -#define ALPHA X0 - -#define LOAD4 \ - PREFETCHNTA (X_PTR )(INC_X*8) \ - MOVDDUP (X_PTR), X1 \ - MOVDDUP (X_PTR)(INC_X*1), X2 \ - MOVDDUP (X_PTR)(INC_X*2), X3 \ - MOVDDUP (X_PTR)(INC3_X*1), X4 \ - MULPD ALPHA, X1 \ - MULPD ALPHA, X2 \ - MULPD ALPHA, X3 \ - MULPD ALPHA, X4 - -#define LOAD2 \ - MOVDDUP (X_PTR), X1 \ - MOVDDUP (X_PTR)(INC_X*1), X2 \ - MULPD ALPHA, X1 \ - MULPD ALPHA, X2 - -#define LOAD1 \ - MOVDDUP (X_PTR), X1 \ - MULPD ALPHA, X1 - -#define KERNEL_LOAD4 \ - MOVUPS (Y_PTR), X5 \ - MOVUPS 2*SIZE(Y_PTR), X6 - -#define KERNEL_LOAD4_INC \ - MOVLPD (Y_PTR), X5 \ - MOVHPD (Y_PTR)(INC_Y*1), X5 \ - MOVLPD (Y_PTR)(INC_Y*2), X6 \ - MOVHPD (Y_PTR)(INC3_Y*1), X6 - -#define KERNEL_LOAD2 \ - MOVUPS (Y_PTR), X5 - -#define KERNEL_LOAD2_INC \ - MOVLPD (Y_PTR), X5 \ - MOVHPD (Y_PTR)(INC_Y*1), X5 - -#define KERNEL_4x4 \ - MOVUPS X5, X7 \ - MOVUPS X6, X8 \ - MOVUPS X5, X9 \ - MOVUPS X6, X10 \ - MOVUPS X5, X11 \ - MOVUPS X6, X12 \ - MULPD X1, X5 \ - MULPD X1, X6 \ - MULPD X2, X7 \ - MULPD X2, X8 \ - MULPD X3, X9 \ - MULPD X3, X10 \ - MULPD X4, X11 \ - MULPD X4, X12 - -#define STORE_4x4 \ - MOVUPS (A_PTR), X13 \ - ADDPD X13, X5 \ - MOVUPS 2*SIZE(A_PTR), X14 \ - ADDPD X14, X6 \ - MOVUPS (A_PTR)(LDA*1), X15 \ - ADDPD X15, X7 \ - MOVUPS 2*SIZE(A_PTR)(LDA*1), X0 \ - ADDPD X0, X8 \ - MOVUPS (A_PTR)(LDA*2), X13 \ - ADDPD X13, X9 \ - MOVUPS 2*SIZE(A_PTR)(LDA*2), X14 \ - ADDPD X14, X10 \ - MOVUPS (A_PTR)(LDA3*1), X15 \ - ADDPD X15, X11 \ - MOVUPS 2*SIZE(A_PTR)(LDA3*1), X0 \ - ADDPD X0, X12 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, 2*SIZE(A_PTR) \ - MOVUPS X7, (A_PTR)(LDA*1) \ - MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ - MOVUPS X9, (A_PTR)(LDA*2) \ - MOVUPS X10, 2*SIZE(A_PTR)(LDA*2) \ - MOVUPS X11, (A_PTR)(LDA3*1) \ - MOVUPS X12, 2*SIZE(A_PTR)(LDA3*1) \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_4x2 \ - MOVUPS X5, X6 \ - MOVUPS X5, X7 \ - MOVUPS X5, X8 \ - MULPD X1, X5 \ - MULPD X2, X6 \ - MULPD X3, X7 \ - MULPD X4, X8 - -#define STORE_4x2 \ - MOVUPS (A_PTR), X9 \ - ADDPD X9, X5 \ - MOVUPS (A_PTR)(LDA*1), X10 \ - ADDPD X10, X6 \ - MOVUPS (A_PTR)(LDA*2), X11 \ - ADDPD X11, X7 \ - MOVUPS (A_PTR)(LDA3*1), X12 \ - ADDPD X12, X8 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, (A_PTR)(LDA*1) \ - MOVUPS X7, (A_PTR)(LDA*2) \ - MOVUPS X8, (A_PTR)(LDA3*1) \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_4x1 \ - MOVSD (Y_PTR), X5 \ - MOVSD X5, X6 \ - MOVSD X5, X7 \ - MOVSD X5, X8 \ - MULSD X1, X5 \ - MULSD X2, X6 \ - MULSD X3, X7 \ - MULSD X4, X8 - -#define STORE_4x1 \ - ADDSD (A_PTR), X5 \ - ADDSD (A_PTR)(LDA*1), X6 \ - ADDSD (A_PTR)(LDA*2), X7 \ - ADDSD (A_PTR)(LDA3*1), X8 \ - MOVSD X5, (A_PTR) \ - MOVSD X6, (A_PTR)(LDA*1) \ - MOVSD X7, (A_PTR)(LDA*2) \ - MOVSD X8, (A_PTR)(LDA3*1) \ - ADDQ $SIZE, A_PTR - -#define KERNEL_2x4 \ - MOVUPS X5, X7 \ - MOVUPS X6, X8 \ - MULPD X1, X5 \ - MULPD X1, X6 \ - MULPD X2, X7 \ - MULPD X2, X8 - -#define STORE_2x4 \ - MOVUPS (A_PTR), X9 \ - ADDPD X9, X5 \ - MOVUPS 2*SIZE(A_PTR), X10 \ - ADDPD X10, X6 \ - MOVUPS (A_PTR)(LDA*1), X11 \ - ADDPD X11, X7 \ - MOVUPS 2*SIZE(A_PTR)(LDA*1), X12 \ - ADDPD X12, X8 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, 2*SIZE(A_PTR) \ - MOVUPS X7, (A_PTR)(LDA*1) \ - MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_2x2 \ - MOVUPS X5, X6 \ - MULPD X1, X5 \ - MULPD X2, X6 - -#define STORE_2x2 \ - MOVUPS (A_PTR), X7 \ - ADDPD X7, X5 \ - MOVUPS (A_PTR)(LDA*1), X8 \ - ADDPD X8, X6 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, (A_PTR)(LDA*1) \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_2x1 \ - MOVSD (Y_PTR), X5 \ - MOVSD X5, X6 \ - MULSD X1, X5 \ - MULSD X2, X6 - -#define STORE_2x1 \ - ADDSD (A_PTR), X5 \ - ADDSD (A_PTR)(LDA*1), X6 \ - MOVSD X5, (A_PTR) \ - MOVSD X6, (A_PTR)(LDA*1) \ - ADDQ $SIZE, A_PTR - -#define KERNEL_1x4 \ - MULPD X1, X5 \ - MULPD X1, X6 - -#define STORE_1x4 \ - MOVUPS (A_PTR), X7 \ - ADDPD X7, X5 \ - MOVUPS 2*SIZE(A_PTR), X8 \ - ADDPD X8, X6 \ - MOVUPS X5, (A_PTR) \ - MOVUPS X6, 2*SIZE(A_PTR) \ - ADDQ $4*SIZE, A_PTR - -#define KERNEL_1x2 \ - MULPD X1, X5 - -#define STORE_1x2 \ - MOVUPS (A_PTR), X6 \ - ADDPD X6, X5 \ - MOVUPS X5, (A_PTR) \ - ADDQ $2*SIZE, A_PTR - -#define KERNEL_1x1 \ - MOVSD (Y_PTR), X5 \ - MULSD X1, X5 - -#define STORE_1x1 \ - ADDSD (A_PTR), X5 \ - MOVSD X5, (A_PTR) \ - ADDQ $SIZE, A_PTR - -// func Ger(m, n uintptr, alpha float64, -// x []float64, incX uintptr, -// y []float64, incY uintptr, -// a []float64, lda uintptr) -TEXT ·Ger(SB), NOSPLIT, $0 - MOVQ M_DIM, M - MOVQ N_DIM, N - CMPQ M, $0 - JE end - CMPQ N, $0 - JE end - - MOVDDUP alpha+16(FP), ALPHA - - MOVQ x_base+24(FP), X_PTR - MOVQ y_base+56(FP), Y_PTR - MOVQ a_base+88(FP), A_ROW - MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float64) - SHLQ $3, INC_X - MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float64) - SHLQ $3, LDA - LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 - LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 - MOVQ A_ROW, A_PTR - - XORQ TMP2, TMP2 - MOVQ M, TMP1 - SUBQ $1, TMP1 - IMULQ INC_X, TMP1 - NEGQ TMP1 - CMPQ INC_X, $0 - CMOVQLT TMP1, TMP2 - LEAQ (X_PTR)(TMP2*SIZE), X_PTR - - CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) - JG inc - JL end - - SHRQ $2, M - JZ r2 - -r4: - // LOAD 4 - LOAD4 - - MOVQ N_DIM, N - SHRQ $2, N - JZ r4c2 - -r4c4: - // 4x4 KERNEL - KERNEL_LOAD4 - KERNEL_4x4 - STORE_4x4 - - ADDQ $4*SIZE, Y_PTR - - DECQ N - JNZ r4c4 - - // Reload ALPHA after it's clobbered by STORE_4x4 - MOVDDUP alpha+16(FP), ALPHA - -r4c2: - TESTQ $2, N_DIM - JZ r4c1 - - // 4x2 KERNEL - KERNEL_LOAD2 - KERNEL_4x2 - STORE_4x2 - - ADDQ $2*SIZE, Y_PTR - -r4c1: - TESTQ $1, N_DIM - JZ r4end - - // 4x1 KERNEL - KERNEL_4x1 - STORE_4x1 - - ADDQ $SIZE, Y_PTR - -r4end: - LEAQ (X_PTR)(INC_X*4), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ M - JNZ r4 - -r2: - TESTQ $2, M_DIM - JZ r1 - - // LOAD 2 - LOAD2 - - MOVQ N_DIM, N - SHRQ $2, N - JZ r2c2 - -r2c4: - // 2x4 KERNEL - KERNEL_LOAD4 - KERNEL_2x4 - STORE_2x4 - - ADDQ $4*SIZE, Y_PTR - - DECQ N - JNZ r2c4 - -r2c2: - TESTQ $2, N_DIM - JZ r2c1 - - // 2x2 KERNEL - KERNEL_LOAD2 - KERNEL_2x2 - STORE_2x2 - - ADDQ $2*SIZE, Y_PTR - -r2c1: - TESTQ $1, N_DIM - JZ r2end - - // 2x1 KERNEL - KERNEL_2x1 - STORE_2x1 - - ADDQ $SIZE, Y_PTR - -r2end: - LEAQ (X_PTR)(INC_X*2), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -r1: - TESTQ $1, M_DIM - JZ end - - // LOAD 1 - LOAD1 - - MOVQ N_DIM, N - SHRQ $2, N - JZ r1c2 - -r1c4: - // 1x4 KERNEL - KERNEL_LOAD4 - KERNEL_1x4 - STORE_1x4 - - ADDQ $4*SIZE, Y_PTR - - DECQ N - JNZ r1c4 - -r1c2: - TESTQ $2, N_DIM - JZ r1c1 - - // 1x2 KERNEL - KERNEL_LOAD2 - KERNEL_1x2 - STORE_1x2 - - ADDQ $2*SIZE, Y_PTR - -r1c1: - TESTQ $1, N_DIM - JZ end - - // 1x1 KERNEL - KERNEL_1x1 - STORE_1x1 - - ADDQ $SIZE, Y_PTR - -end: - RET - -inc: // Algorithm for incY != 1 ( split loads in kernel ) - - MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float64) - SHLQ $3, INC_Y - LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 - - XORQ TMP2, TMP2 - MOVQ N, TMP1 - SUBQ $1, TMP1 - IMULQ INC_Y, TMP1 - NEGQ TMP1 - CMPQ INC_Y, $0 - CMOVQLT TMP1, TMP2 - LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR - - SHRQ $2, M - JZ inc_r2 - -inc_r4: - // LOAD 4 - LOAD4 - - MOVQ N_DIM, N - SHRQ $2, N - JZ inc_r4c2 - -inc_r4c4: - // 4x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_4x4 - STORE_4x4 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ N - JNZ inc_r4c4 - - // Reload ALPHA after it's clobbered by STORE_4x4 - MOVDDUP alpha+16(FP), ALPHA - -inc_r4c2: - TESTQ $2, N_DIM - JZ inc_r4c1 - - // 4x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_4x2 - STORE_4x2 - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_r4c1: - TESTQ $1, N_DIM - JZ inc_r4end - - // 4x1 KERNEL - KERNEL_4x1 - STORE_4x1 - - ADDQ INC_Y, Y_PTR - -inc_r4end: - LEAQ (X_PTR)(INC_X*4), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*4), A_ROW - MOVQ A_ROW, A_PTR - - DECQ M - JNZ inc_r4 - -inc_r2: - TESTQ $2, M_DIM - JZ inc_r1 - - // LOAD 2 - LOAD2 - - MOVQ N_DIM, N - SHRQ $2, N - JZ inc_r2c2 - -inc_r2c4: - // 2x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_2x4 - STORE_2x4 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ N - JNZ inc_r2c4 - -inc_r2c2: - TESTQ $2, N_DIM - JZ inc_r2c1 - - // 2x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_2x2 - STORE_2x2 - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_r2c1: - TESTQ $1, N_DIM - JZ inc_r2end - - // 2x1 KERNEL - KERNEL_2x1 - STORE_2x1 - - ADDQ INC_Y, Y_PTR - -inc_r2end: - LEAQ (X_PTR)(INC_X*2), X_PTR - MOVQ Y, Y_PTR - LEAQ (A_ROW)(LDA*2), A_ROW - MOVQ A_ROW, A_PTR - -inc_r1: - TESTQ $1, M_DIM - JZ end - - // LOAD 1 - LOAD1 - - MOVQ N_DIM, N - SHRQ $2, N - JZ inc_r1c2 - -inc_r1c4: - // 1x4 KERNEL - KERNEL_LOAD4_INC - KERNEL_1x4 - STORE_1x4 - - LEAQ (Y_PTR)(INC_Y*4), Y_PTR - DECQ N - JNZ inc_r1c4 - -inc_r1c2: - TESTQ $2, N_DIM - JZ inc_r1c1 - - // 1x2 KERNEL - KERNEL_LOAD2_INC - KERNEL_1x2 - STORE_1x2 - - LEAQ (Y_PTR)(INC_Y*2), Y_PTR - -inc_r1c1: - TESTQ $1, N_DIM - JZ end - - // 1x1 KERNEL - KERNEL_1x1 - STORE_1x1 - - ADDQ INC_Y, Y_PTR - -inc_end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s deleted file mode 100644 index f87f856ca..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func L1Dist(s, t []float64) float64 -TEXT ·L1Dist(SB), NOSPLIT, $0 - MOVQ s_base+0(FP), DI // DI = &s - MOVQ t_base+24(FP), SI // SI = &t - MOVQ s_len+8(FP), CX // CX = len(s) - CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) - CMOVQLE t_len+32(FP), CX - PXOR X3, X3 // norm = 0 - CMPQ CX, $0 // if CX == 0 { return 0 } - JE l1_end - XORQ AX, AX // i = 0 - MOVQ CX, BX - ANDQ $1, BX // BX = CX % 2 - SHRQ $1, CX // CX = floor( CX / 2 ) - JZ l1_tail_start // if CX == 0 { return 0 } - -l1_loop: // Loop unrolled 2x do { - MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] - MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] - MOVAPS X0, X2 - SUBPD X1, X0 - SUBPD X2, X1 - MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) - ADDPD X0, X3 // norm += X0 - ADDQ $2, AX // i += 2 - LOOP l1_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE l1_end - -l1_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - PXOR X0, X0 // reset X0, X1 to break dependencies - PXOR X1, X1 - -l1_tail: - MOVSD (SI)(AX*8), X0 // X0 = t[i] - MOVSD (DI)(AX*8), X1 // x1 = s[i] - MOVAPD X0, X2 - SUBSD X1, X0 - SUBSD X2, X1 - MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) - ADDSD X0, X3 // norm += X0 - -l1_end: - MOVAPS X3, X2 - SHUFPD $1, X2, X2 - ADDSD X3, X2 // X2 = X3[1] + X3[0] - MOVSD X2, ret+48(FP) // return X2 - RET - diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s deleted file mode 100644 index b06259280..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -// func LinfDist(s, t []float64) float64 -TEXT ·LinfDist(SB), NOSPLIT, $0 - MOVQ s_base+0(FP), DI // DI = &s - MOVQ t_base+24(FP), SI // SI = &t - MOVQ s_len+8(FP), CX // CX = len(s) - CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) - CMOVQLE t_len+32(FP), CX - PXOR X3, X3 // norm = 0 - CMPQ CX, $0 // if CX == 0 { return 0 } - JE l1_end - XORQ AX, AX // i = 0 - MOVQ CX, BX - ANDQ $1, BX // BX = CX % 2 - SHRQ $1, CX // CX = floor( CX / 2 ) - JZ l1_tail_start // if CX == 0 { return 0 } - -l1_loop: // Loop unrolled 2x do { - MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] - MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] - MOVAPS X0, X2 - SUBPD X1, X0 - SUBPD X2, X1 - MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) - MAXPD X0, X3 // norm = max( norm, X0 ) - ADDQ $2, AX // i += 2 - LOOP l1_loop // } while --CX > 0 - CMPQ BX, $0 // if BX == 0 { return } - JE l1_end - -l1_tail_start: // Reset loop registers - MOVQ BX, CX // Loop counter: CX = BX - PXOR X0, X0 // reset X0, X1 to break dependencies - PXOR X1, X1 - -l1_tail: - MOVSD (SI)(AX*8), X0 // X0 = t[i] - MOVSD (DI)(AX*8), X1 // X1 = s[i] - MOVAPD X0, X2 - SUBSD X1, X0 - SUBSD X2, X1 - MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) - MAXSD X0, X3 // norm = max( norm, X0 ) - -l1_end: - MOVAPS X3, X2 - SHUFPD $1, X2, X2 - MAXSD X3, X2 // X2 = max( X3[1], X3[0] ) - MOVSD X2, ret+48(FP) // return X2 - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go deleted file mode 100644 index 3cc7aca69..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package f64 - -// ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } -func ScalUnitary(alpha float64, x []float64) { - for i := range x { - x[i] *= alpha - } -} - -// ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } -func ScalUnitaryTo(dst []float64, alpha float64, x []float64) { - for i, v := range x { - dst[i] = alpha * v - } -} - -// ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } -func ScalInc(alpha float64, x []float64, n, incX uintptr) { - var ix uintptr - for i := 0; i < int(n); i++ { - x[ix] *= alpha - ix += incX - } -} - -// ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } -func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) { - var idst, ix uintptr - for i := 0; i < int(n); i++ { - dst[idst] = alpha * x[ix] - ix += incX - idst += incDst - } -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s deleted file mode 100644 index cf185fc0d..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define LEN CX -#define TAIL BX -#define INC_X R8 -#define INCx3_X R9 -#define ALPHA X0 -#define ALPHA_2 X1 - -// func ScalInc(alpha float64, x []float64, n, incX uintptr) -TEXT ·ScalInc(SB), NOSPLIT, $0 - MOVSD alpha+0(FP), ALPHA // ALPHA = alpha - MOVQ x_base+8(FP), X_PTR // X_PTR = &x - MOVQ incX+40(FP), INC_X // INC_X = incX - SHLQ $3, INC_X // INC_X *= sizeof(float64) - MOVQ n+32(FP), LEN // LEN = n - CMPQ LEN, $0 - JE end // if LEN == 0 { return } - - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = LEN % 4 - SHRQ $2, LEN // LEN = floor( LEN / 4 ) - JZ tail_start // if LEN == 0 { goto tail_start } - - MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - -loop: // do { // x[i] *= alpha unrolled 4x. - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MOVSD (X_PTR)(INC_X*2), X4 - MOVSD (X_PTR)(INCx3_X*1), X5 - - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA_2, X3 - MULSD ALPHA, X4 - MULSD ALPHA_2, X5 - - MOVSD X2, (X_PTR) // x[i] = X_i - MOVSD X3, (X_PTR)(INC_X*1) - MOVSD X4, (X_PTR)(INC_X*2) - MOVSD X5, (X_PTR)(INCx3_X*1) - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) - DECQ LEN - JNZ loop // } while --LEN > 0 - CMPQ TAIL, $0 - JE end // if TAIL == 0 { return } - -tail_start: // Reset loop registers - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( LEN / 2 ) - JZ tail_one - -tail_two: // do { - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA, X3 - MOVSD X2, (X_PTR) // x[i] = X_i - MOVSD X3, (X_PTR)(INC_X*1) - - LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) - - ANDQ $1, TAIL - JZ end - -tail_one: - MOVSD (X_PTR), X2 // X_i = x[i] - MULSD ALPHA, X2 // X_i *= ALPHA - MOVSD X2, (X_PTR) // x[i] = X_i - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s deleted file mode 100644 index cf359ac1e..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define DST_PTR DI -#define LEN CX -#define TAIL BX -#define INC_X R8 -#define INCx3_X R9 -#define INC_DST R10 -#define INCx3_DST R11 -#define ALPHA X0 -#define ALPHA_2 X1 - -// func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) -TEXT ·ScalIncTo(SB), NOSPLIT, $0 - MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst - MOVQ incDst+24(FP), INC_DST // INC_DST = incDst - SHLQ $3, INC_DST // INC_DST *= sizeof(float64) - MOVSD alpha+32(FP), ALPHA // ALPHA = alpha - MOVQ x_base+40(FP), X_PTR // X_PTR = &x - MOVQ n+64(FP), LEN // LEN = n - MOVQ incX+72(FP), INC_X // INC_X = incX - SHLQ $3, INC_X // INC_X *= sizeof(float64) - CMPQ LEN, $0 - JE end // if LEN == 0 { return } - - MOVQ LEN, TAIL - ANDQ $3, TAIL // TAIL = LEN % 4 - SHRQ $2, LEN // LEN = floor( LEN / 4 ) - JZ tail_start // if LEN == 0 { goto tail_start } - - MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining - LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 - LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 - -loop: // do { // x[i] *= alpha unrolled 4x. - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MOVSD (X_PTR)(INC_X*2), X4 - MOVSD (X_PTR)(INCx3_X*1), X5 - - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA_2, X3 - MULSD ALPHA, X4 - MULSD ALPHA_2, X5 - - MOVSD X2, (DST_PTR) // dst[i] = X_i - MOVSD X3, (DST_PTR)(INC_DST*1) - MOVSD X4, (DST_PTR)(INC_DST*2) - MOVSD X5, (DST_PTR)(INCx3_DST*1) - - LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) - LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]) - DECQ LEN - JNZ loop // } while --LEN > 0 - CMPQ TAIL, $0 - JE end // if TAIL == 0 { return } - -tail_start: // Reset loop registers - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( LEN / 2 ) - JZ tail_one - -tail_two: - MOVSD (X_PTR), X2 // X_i = x[i] - MOVSD (X_PTR)(INC_X*1), X3 - MULSD ALPHA, X2 // X_i *= a - MULSD ALPHA, X3 - MOVSD X2, (DST_PTR) // dst[i] = X_i - MOVSD X3, (DST_PTR)(INC_DST*1) - - LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) - LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incDst*2]) - - ANDQ $1, TAIL - JZ end - -tail_one: - MOVSD (X_PTR), X2 // X_i = x[i] - MULSD ALPHA, X2 // X_i *= ALPHA - MOVSD X2, (DST_PTR) // x[i] = X_i - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s deleted file mode 100644 index 560aef2a3..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // @ MOVDDUP XMM0, 8[RSP] - -#define X_PTR SI -#define DST_PTR DI -#define IDX AX -#define LEN CX -#define TAIL BX -#define ALPHA X0 -#define ALPHA_2 X1 - -// func ScalUnitary(alpha float64, x []float64) -TEXT ·ScalUnitary(SB), NOSPLIT, $0 - MOVDDUP_ALPHA // ALPHA = { alpha, alpha } - MOVQ x_base+8(FP), X_PTR // X_PTR = &x - MOVQ x_len+16(FP), LEN // LEN = len(x) - CMPQ LEN, $0 - JE end // if LEN == 0 { return } - XORQ IDX, IDX // IDX = 0 - - MOVQ LEN, TAIL - ANDQ $7, TAIL // TAIL = LEN % 8 - SHRQ $3, LEN // LEN = floor( LEN / 8 ) - JZ tail_start // if LEN == 0 { goto tail_start } - - MOVUPS ALPHA, ALPHA_2 - -loop: // do { // x[i] *= alpha unrolled 8x. - MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] - MOVUPS 16(X_PTR)(IDX*8), X3 - MOVUPS 32(X_PTR)(IDX*8), X4 - MOVUPS 48(X_PTR)(IDX*8), X5 - - MULPD ALPHA, X2 // X_i *= ALPHA - MULPD ALPHA_2, X3 - MULPD ALPHA, X4 - MULPD ALPHA_2, X5 - - MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i - MOVUPS X3, 16(X_PTR)(IDX*8) - MOVUPS X4, 32(X_PTR)(IDX*8) - MOVUPS X5, 48(X_PTR)(IDX*8) - - ADDQ $8, IDX // i += 8 - DECQ LEN - JNZ loop // while --LEN > 0 - CMPQ TAIL, $0 - JE end // if TAIL == 0 { return } - -tail_start: // Reset loop registers - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( TAIL / 2 ) - JZ tail_one // if n == 0 goto end - -tail_two: // do { - MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] - MULPD ALPHA, X2 // X_i *= ALPHA - MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i - ADDQ $2, IDX // i += 2 - DECQ LEN - JNZ tail_two // while --LEN > 0 - - ANDQ $1, TAIL - JZ end // if TAIL == 0 { return } - -tail_one: - // x[i] *= alpha for the remaining element. - MOVSD (X_PTR)(IDX*8), X2 - MULSD ALPHA, X2 - MOVSD X2, (X_PTR)(IDX*8) - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s deleted file mode 100644 index a5b2b018f..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x2024 // @ MOVDDUP 32(SP), X0 /*XMM0, 32[RSP]*/ - -#define X_PTR SI -#define DST_PTR DI -#define IDX AX -#define LEN CX -#define TAIL BX -#define ALPHA X0 -#define ALPHA_2 X1 - -// func ScalUnitaryTo(dst []float64, alpha float64, x []float64) -// This function assumes len(dst) >= len(x). -TEXT ·ScalUnitaryTo(SB), NOSPLIT, $0 - MOVQ x_base+32(FP), X_PTR // X_PTR = &x - MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst - MOVDDUP_ALPHA // ALPHA = { alpha, alpha } - MOVQ x_len+40(FP), LEN // LEN = len(x) - CMPQ LEN, $0 - JE end // if LEN == 0 { return } - - XORQ IDX, IDX // IDX = 0 - MOVQ LEN, TAIL - ANDQ $7, TAIL // TAIL = LEN % 8 - SHRQ $3, LEN // LEN = floor( LEN / 8 ) - JZ tail_start // if LEN == 0 { goto tail_start } - - MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining - -loop: // do { // dst[i] = alpha * x[i] unrolled 8x. - MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] - MOVUPS 16(X_PTR)(IDX*8), X3 - MOVUPS 32(X_PTR)(IDX*8), X4 - MOVUPS 48(X_PTR)(IDX*8), X5 - - MULPD ALPHA, X2 // X_i *= ALPHA - MULPD ALPHA_2, X3 - MULPD ALPHA, X4 - MULPD ALPHA_2, X5 - - MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i - MOVUPS X3, 16(DST_PTR)(IDX*8) - MOVUPS X4, 32(DST_PTR)(IDX*8) - MOVUPS X5, 48(DST_PTR)(IDX*8) - - ADDQ $8, IDX // i += 8 - DECQ LEN - JNZ loop // while --LEN > 0 - CMPQ TAIL, $0 - JE end // if TAIL == 0 { return } - -tail_start: // Reset loop counters - MOVQ TAIL, LEN // Loop counter: LEN = TAIL - SHRQ $1, LEN // LEN = floor( TAIL / 2 ) - JZ tail_one // if LEN == 0 { goto tail_one } - -tail_two: // do { - MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] - MULPD ALPHA, X2 // X_i *= ALPHA - MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i - ADDQ $2, IDX // i += 2 - DECQ LEN - JNZ tail_two // while --LEN > 0 - - ANDQ $1, TAIL - JZ end // if TAIL == 0 { return } - -tail_one: - MOVSD (X_PTR)(IDX*8), X2 // X_i = x[i] - MULSD ALPHA, X2 // X_i *= ALPHA - MOVSD X2, (DST_PTR)(IDX*8) // dst[i] = X_i - -end: - RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go deleted file mode 100644 index a51b94514..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -package f64 - -// L1Norm is -// for _, v := range x { -// sum += math.Abs(v) -// } -// return sum -func L1Norm(x []float64) (sum float64) - -// L1NormInc is -// for i := 0; i < n*incX; i += incX { -// sum += math.Abs(x[i]) -// } -// return sum -func L1NormInc(x []float64, n, incX int) (sum float64) - -// AddConst is -// for i := range x { -// x[i] += alpha -// } -func AddConst(alpha float64, x []float64) - -// Add is -// for i, v := range s { -// dst[i] += v -// } -func Add(dst, s []float64) - -// AxpyUnitary is -// for i, v := range x { -// y[i] += alpha * v -// } -func AxpyUnitary(alpha float64, x, y []float64) - -// AxpyUnitaryTo is -// for i, v := range x { -// dst[i] = alpha*v + y[i] -// } -func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) - -// AxpyInc is -// for i := 0; i < int(n); i++ { -// y[iy] += alpha * x[ix] -// ix += incX -// iy += incY -// } -func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) - -// AxpyIncTo is -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha*x[ix] + y[iy] -// ix += incX -// iy += incY -// idst += incDst -// } -func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) - -// CumSum is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] + v -// } -// return dst -func CumSum(dst, s []float64) []float64 - -// CumProd is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] * v -// } -// return dst -func CumProd(dst, s []float64) []float64 - -// Div is -// for i, v := range s { -// dst[i] /= v -// } -func Div(dst, s []float64) - -// DivTo is -// for i, v := range s { -// dst[i] = v / t[i] -// } -// return dst -func DivTo(dst, x, y []float64) []float64 - -// DotUnitary is -// for i, v := range x { -// sum += y[i] * v -// } -// return sum -func DotUnitary(x, y []float64) (sum float64) - -// DotInc is -// for i := 0; i < int(n); i++ { -// sum += y[iy] * x[ix] -// ix += incX -// iy += incY -// } -// return sum -func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) - -// L1Dist is -// var norm float64 -// for i, v := range s { -// norm += math.Abs(t[i] - v) -// } -// return norm -func L1Dist(s, t []float64) float64 - -// LinfDist is -// var norm float64 -// if len(s) == 0 { -// return 0 -// } -// norm = math.Abs(t[0] - s[0]) -// for i, v := range s[1:] { -// absDiff := math.Abs(t[i+1] - v) -// if absDiff > norm || math.IsNaN(norm) { -// norm = absDiff -// } -// } -// return norm -func LinfDist(s, t []float64) float64 - -// ScalUnitary is -// for i := range x { -// x[i] *= alpha -// } -func ScalUnitary(alpha float64, x []float64) - -// ScalUnitaryTo is -// for i, v := range x { -// dst[i] = alpha * v -// } -func ScalUnitaryTo(dst []float64, alpha float64, x []float64) - -// ScalInc is -// var ix uintptr -// for i := 0; i < int(n); i++ { -// x[ix] *= alpha -// ix += incX -// } -func ScalInc(alpha float64, x []float64, n, incX uintptr) - -// ScalIncTo is -// var idst, ix uintptr -// for i := 0; i < int(n); i++ { -// dst[idst] = alpha * x[ix] -// ix += incX -// idst += incDst -// } -func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) - -// Sum is -// var sum float64 -// for i := range x { -// sum += x[i] -// } -func Sum(x []float64) float64 diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go deleted file mode 100644 index 670978aa4..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package f64 - -import "math" - -// L1Norm is -// for _, v := range x { -// sum += math.Abs(v) -// } -// return sum -func L1Norm(x []float64) (sum float64) { - for _, v := range x { - sum += math.Abs(v) - } - return sum -} - -// L1NormInc is -// for i := 0; i < n*incX; i += incX { -// sum += math.Abs(x[i]) -// } -// return sum -func L1NormInc(x []float64, n, incX int) (sum float64) { - for i := 0; i < n*incX; i += incX { - sum += math.Abs(x[i]) - } - return sum -} - -// Add is -// for i, v := range s { -// dst[i] += v -// } -func Add(dst, s []float64) { - for i, v := range s { - dst[i] += v - } -} - -// AddConst is -// for i := range x { -// x[i] += alpha -// } -func AddConst(alpha float64, x []float64) { - for i := range x { - x[i] += alpha - } -} - -// CumSum is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] + v -// } -// return dst -func CumSum(dst, s []float64) []float64 { - if len(s) == 0 { - return dst - } - dst[0] = s[0] - for i, v := range s[1:] { - dst[i+1] = dst[i] + v - } - return dst -} - -// CumProd is -// if len(s) == 0 { -// return dst -// } -// dst[0] = s[0] -// for i, v := range s[1:] { -// dst[i+1] = dst[i] * v -// } -// return dst -func CumProd(dst, s []float64) []float64 { - if len(s) == 0 { - return dst - } - dst[0] = s[0] - for i, v := range s[1:] { - dst[i+1] = dst[i] * v - } - return dst -} - -// Div is -// for i, v := range s { -// dst[i] /= v -// } -func Div(dst, s []float64) { - for i, v := range s { - dst[i] /= v - } -} - -// DivTo is -// for i, v := range s { -// dst[i] = v / t[i] -// } -// return dst -func DivTo(dst, s, t []float64) []float64 { - for i, v := range s { - dst[i] = v / t[i] - } - return dst -} - -// L1Dist is -// var norm float64 -// for i, v := range s { -// norm += math.Abs(t[i] - v) -// } -// return norm -func L1Dist(s, t []float64) float64 { - var norm float64 - for i, v := range s { - norm += math.Abs(t[i] - v) - } - return norm -} - -// LinfDist is -// var norm float64 -// if len(s) == 0 { -// return 0 -// } -// norm = math.Abs(t[0] - s[0]) -// for i, v := range s[1:] { -// absDiff := math.Abs(t[i+1] - v) -// if absDiff > norm || math.IsNaN(norm) { -// norm = absDiff -// } -// } -// return norm -func LinfDist(s, t []float64) float64 { - var norm float64 - if len(s) == 0 { - return 0 - } - norm = math.Abs(t[0] - s[0]) - for i, v := range s[1:] { - absDiff := math.Abs(t[i+1] - v) - if absDiff > norm || math.IsNaN(norm) { - norm = absDiff - } - } - return norm -} - -// Sum is -// var sum float64 -// for i := range x { -// sum += x[i] -// } -func Sum(x []float64) float64 { - var sum float64 - for _, v := range x { - sum += v - } - return sum -} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s deleted file mode 100644 index 22eede6e1..000000000 --- a/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -#include "textflag.h" - -#define X_PTR SI -#define IDX AX -#define LEN CX -#define TAIL BX -#define SUM X0 -#define SUM_1 X1 -#define SUM_2 X2 -#define SUM_3 X3 - -// func Sum(x []float64) float64 -TEXT ·Sum(SB), NOSPLIT, $0 - MOVQ x_base+0(FP), X_PTR // X_PTR = &x - MOVQ x_len+8(FP), LEN // LEN = len(x) - XORQ IDX, IDX // i = 0 - PXOR SUM, SUM // p_sum_i = 0 - CMPQ LEN, $0 // if LEN == 0 { return 0 } - JE sum_end - - PXOR SUM_1, SUM_1 - PXOR SUM_2, SUM_2 - PXOR SUM_3, SUM_3 - - MOVQ X_PTR, TAIL // Check memory alignment - ANDQ $15, TAIL // TAIL = &y % 16 - JZ no_trim // if TAIL == 0 { goto no_trim } - - // Align on 16-byte boundary - ADDSD (X_PTR), X0 // X0 += x[0] - INCQ IDX // i++ - DECQ LEN // LEN-- - DECQ TAIL // TAIL-- - JZ sum_end // if TAIL == 0 { return } - -no_trim: - MOVQ LEN, TAIL - SHRQ $4, LEN // LEN = floor( n / 16 ) - JZ sum_tail8 // if LEN == 0 { goto sum_tail8 } - -sum_loop: // sum 16x wide do { - ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] - ADDPD 16(SI)(AX*8), SUM_1 - ADDPD 32(SI)(AX*8), SUM_2 - ADDPD 48(SI)(AX*8), SUM_3 - ADDPD 64(SI)(AX*8), SUM - ADDPD 80(SI)(AX*8), SUM_1 - ADDPD 96(SI)(AX*8), SUM_2 - ADDPD 112(SI)(AX*8), SUM_3 - ADDQ $16, IDX // i += 16 - DECQ LEN - JNZ sum_loop // } while --CX > 0 - -sum_tail8: - TESTQ $8, TAIL - JZ sum_tail4 - - ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] - ADDPD 16(SI)(AX*8), SUM_1 - ADDPD 32(SI)(AX*8), SUM_2 - ADDPD 48(SI)(AX*8), SUM_3 - ADDQ $8, IDX - -sum_tail4: - ADDPD SUM_3, SUM - ADDPD SUM_2, SUM_1 - - TESTQ $4, TAIL - JZ sum_tail2 - - ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] - ADDPD 16(SI)(AX*8), SUM_1 - ADDQ $4, IDX - -sum_tail2: - ADDPD SUM_1, SUM - - TESTQ $2, TAIL - JZ sum_tail1 - - ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] - ADDQ $2, IDX - -sum_tail1: - HADDPD SUM, SUM // sum_i[0] += sum_i[1] - - TESTQ $1, TAIL - JZ sum_end - - ADDSD (SI)(IDX*8), SUM - -sum_end: // return sum - MOVSD SUM, sum+24(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go deleted file mode 100644 index ac6eb81c0..000000000 --- a/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmplx64 - -import math "gonum.org/v1/gonum/internal/math32" - -// Abs returns the absolute value (also called the modulus) of x. -func Abs(x complex64) float32 { return math.Hypot(real(x), imag(x)) } diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go deleted file mode 100644 index 705262f2f..000000000 --- a/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmplx64 - -// Conj returns the complex conjugate of x. -func Conj(x complex64) complex64 { return complex(real(x), -imag(x)) } diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go deleted file mode 100644 index 5424ea099..000000000 --- a/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cmplx64 provides complex64 versions of standard library math/cmplx -// package routines used by gonum/blas. -package cmplx64 // import "gonum.org/v1/gonum/internal/cmplx64" diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go deleted file mode 100644 index 21d3d180e..000000000 --- a/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmplx64 - -import math "gonum.org/v1/gonum/internal/math32" - -// IsInf returns true if either real(x) or imag(x) is an infinity. -func IsInf(x complex64) bool { - if math.IsInf(real(x), 0) || math.IsInf(imag(x), 0) { - return true - } - return false -} - -// Inf returns a complex infinity, complex(+Inf, +Inf). -func Inf() complex64 { - inf := math.Inf(1) - return complex(inf, inf) -} diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go deleted file mode 100644 index 7e0bf788f..000000000 --- a/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmplx64 - -import math "gonum.org/v1/gonum/internal/math32" - -// IsNaN returns true if either real(x) or imag(x) is NaN -// and neither is an infinity. -func IsNaN(x complex64) bool { - switch { - case math.IsInf(real(x), 0) || math.IsInf(imag(x), 0): - return false - case math.IsNaN(real(x)) || math.IsNaN(imag(x)): - return true - } - return false -} - -// NaN returns a complex ``not-a-number'' value. -func NaN() complex64 { - nan := math.NaN() - return complex(nan, nan) -} diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go deleted file mode 100644 index 439987b4b..000000000 --- a/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmplx64 - -import math "gonum.org/v1/gonum/internal/math32" - -// The original C code, the long comment, and the constants -// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c. -// The go code is a simplified version of the original C. -// -// Cephes Math Library Release 2.8: June, 2000 -// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier -// -// The readme file at http://netlib.sandia.gov/cephes/ says: -// Some software in this archive may be from the book _Methods and -// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster -// International, 1989) or from the Cephes Mathematical Library, a -// commercial product. In either event, it is copyrighted by the author. -// What you see here may be used freely but it comes with no support or -// guarantee. -// -// The two known misprints in the book are repaired here in the -// source listings for the gamma function and the incomplete beta -// integral. -// -// Stephen L. Moshier -// moshier@na-net.ornl.gov - -// Complex square root -// -// DESCRIPTION: -// -// If z = x + iy, r = |z|, then -// -// 1/2 -// Re w = [ (r + x)/2 ] , -// -// 1/2 -// Im w = [ (r - x)/2 ] . -// -// Cancelation error in r-x or r+x is avoided by using the -// identity 2 Re w Im w = y. -// -// Note that -w is also a square root of z. The root chosen -// is always in the right half plane and Im w has the same sign as y. -// -// ACCURACY: -// -// Relative error: -// arithmetic domain # trials peak rms -// DEC -10,+10 25000 3.2e-17 9.6e-18 -// IEEE -10,+10 1,000,000 2.9e-16 6.1e-17 - -// Sqrt returns the square root of x. -// The result r is chosen so that real(r) ≥ 0 and imag(r) has the same sign as imag(x). -func Sqrt(x complex64) complex64 { - if imag(x) == 0 { - if real(x) == 0 { - return complex(0, 0) - } - if real(x) < 0 { - return complex(0, math.Sqrt(-real(x))) - } - return complex(math.Sqrt(real(x)), 0) - } - if real(x) == 0 { - if imag(x) < 0 { - r := math.Sqrt(-0.5 * imag(x)) - return complex(r, -r) - } - r := math.Sqrt(0.5 * imag(x)) - return complex(r, r) - } - a := real(x) - b := imag(x) - var scale float32 - // Rescale to avoid internal overflow or underflow. - if math.Abs(a) > 4 || math.Abs(b) > 4 { - a *= 0.25 - b *= 0.25 - scale = 2 - } else { - a *= 1.8014398509481984e16 // 2**54 - b *= 1.8014398509481984e16 - scale = 7.450580596923828125e-9 // 2**-27 - } - r := math.Hypot(a, b) - var t float32 - if a > 0 { - t = math.Sqrt(0.5*r + 0.5*a) - r = scale * math.Abs((0.5*b)/t) - t *= scale - } else { - r = math.Sqrt(0.5*r - 0.5*a) - t = scale * math.Abs((0.5*b)/r) - r *= scale - } - if b < 0 { - return complex(t, -r) - } - return complex(t, r) -} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/doc.go b/vendor/gonum.org/v1/gonum/internal/math32/doc.go deleted file mode 100644 index 68917c64e..000000000 --- a/vendor/gonum.org/v1/gonum/internal/math32/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package math32 provides float32 versions of standard library math package -// routines used by gonum/blas/native. -package math32 // import "gonum.org/v1/gonum/internal/math32" diff --git a/vendor/gonum.org/v1/gonum/internal/math32/math.go b/vendor/gonum.org/v1/gonum/internal/math32/math.go deleted file mode 100644 index 56c90be02..000000000 --- a/vendor/gonum.org/v1/gonum/internal/math32/math.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package math32 - -import ( - "math" -) - -const ( - unan = 0x7fc00000 - uinf = 0x7f800000 - uneginf = 0xff800000 - mask = 0x7f8 >> 3 - shift = 32 - 8 - 1 - bias = 127 -) - -// Abs returns the absolute value of x. -// -// Special cases are: -// Abs(±Inf) = +Inf -// Abs(NaN) = NaN -func Abs(x float32) float32 { - switch { - case x < 0: - return -x - case x == 0: - return 0 // return correctly abs(-0) - } - return x -} - -// Copysign returns a value with the magnitude -// of x and the sign of y. -func Copysign(x, y float32) float32 { - const sign = 1 << 31 - return math.Float32frombits(math.Float32bits(x)&^sign | math.Float32bits(y)&sign) -} - -// Hypot returns Sqrt(p*p + q*q), taking care to avoid -// unnecessary overflow and underflow. -// -// Special cases are: -// Hypot(±Inf, q) = +Inf -// Hypot(p, ±Inf) = +Inf -// Hypot(NaN, q) = NaN -// Hypot(p, NaN) = NaN -func Hypot(p, q float32) float32 { - // special cases - switch { - case IsInf(p, 0) || IsInf(q, 0): - return Inf(1) - case IsNaN(p) || IsNaN(q): - return NaN() - } - if p < 0 { - p = -p - } - if q < 0 { - q = -q - } - if p < q { - p, q = q, p - } - if p == 0 { - return 0 - } - q = q / p - return p * Sqrt(1+q*q) -} - -// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0. -func Inf(sign int) float32 { - var v uint32 - if sign >= 0 { - v = uinf - } else { - v = uneginf - } - return math.Float32frombits(v) -} - -// IsInf reports whether f is an infinity, according to sign. -// If sign > 0, IsInf reports whether f is positive infinity. -// If sign < 0, IsInf reports whether f is negative infinity. -// If sign == 0, IsInf reports whether f is either infinity. -func IsInf(f float32, sign int) bool { - // Test for infinity by comparing against maximum float. - // To avoid the floating-point hardware, could use: - // x := math.Float32bits(f); - // return sign >= 0 && x == uinf || sign <= 0 && x == uneginf; - return sign >= 0 && f > math.MaxFloat32 || sign <= 0 && f < -math.MaxFloat32 -} - -// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value. -func IsNaN(f float32) (is bool) { - // IEEE 754 says that only NaNs satisfy f != f. - // To avoid the floating-point hardware, could use: - // x := math.Float32bits(f); - // return uint32(x>>shift)&mask == mask && x != uinf && x != uneginf - return f != f -} - -// NaN returns an IEEE 754 ``not-a-number'' value. -func NaN() float32 { return math.Float32frombits(unan) } diff --git a/vendor/gonum.org/v1/gonum/internal/math32/signbit.go b/vendor/gonum.org/v1/gonum/internal/math32/signbit.go deleted file mode 100644 index 3e9f0bb41..000000000 --- a/vendor/gonum.org/v1/gonum/internal/math32/signbit.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package math32 - -import "math" - -// Signbit returns true if x is negative or negative zero. -func Signbit(x float32) bool { - return math.Float32bits(x)&(1<<31) != 0 -} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go b/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go deleted file mode 100644 index bf630de99..000000000 --- a/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 noasm appengine safe - -package math32 - -import ( - "math" -) - -// Sqrt returns the square root of x. -// -// Special cases are: -// Sqrt(+Inf) = +Inf -// Sqrt(±0) = ±0 -// Sqrt(x < 0) = NaN -// Sqrt(NaN) = NaN -func Sqrt(x float32) float32 { - // FIXME(kortschak): Direct translation of the math package - // asm code for 386 fails to build. No test hardware is available - // for arm, so using conversion instead. - return float32(math.Sqrt(float64(x))) -} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go deleted file mode 100644 index 905ae5c68..000000000 --- a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !noasm,!appengine,!safe - -package math32 - -// Sqrt returns the square root of x. -// -// Special cases are: -// Sqrt(+Inf) = +Inf -// Sqrt(±0) = ±0 -// Sqrt(x < 0) = NaN -// Sqrt(NaN) = NaN -func Sqrt(x float32) float32 diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s deleted file mode 100644 index fa2b8696e..000000000 --- a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !noasm,!appengine,!safe - -// TODO(kortschak): use textflag.h after we drop Go 1.3 support -//#include "textflag.h" -// Don't insert stack check preamble. -#define NOSPLIT 4 - -// func Sqrt(x float32) float32 -TEXT ·Sqrt(SB),NOSPLIT,$0 - SQRTSS x+0(FP), X0 - MOVSS X0, ret+8(FP) - RET diff --git a/vendor/gonum.org/v1/gonum/lapack/doc.go b/vendor/gonum.org/v1/gonum/lapack/doc.go deleted file mode 100644 index 2475cb4aa..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lapack provides interfaces for the LAPACK linear algebra standard. -package lapack // import "gonum.org/v1/gonum/lapack" diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go deleted file mode 100644 index 5f3833fd9..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go +++ /dev/null @@ -1,505 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dbdsqr performs a singular value decomposition of a real n×n bidiagonal matrix. -// -// The SVD of the bidiagonal matrix B is -// B = Q * S * P^T -// where S is a diagonal matrix of singular values, Q is an orthogonal matrix of -// left singular vectors, and P is an orthogonal matrix of right singular vectors. -// -// Q and P are only computed if requested. If left singular vectors are requested, -// this routine returns U * Q instead of Q, and if right singular vectors are -// requested P^T * VT is returned instead of P^T. -// -// Frequently Dbdsqr is used in conjunction with Dgebrd which reduces a general -// matrix A into bidiagonal form. In this case, the SVD of A is -// A = (U * Q) * S * (P^T * VT) -// -// This routine may also compute Q^T * C. -// -// d and e contain the elements of the bidiagonal matrix b. d must have length at -// least n, and e must have length at least n-1. Dbdsqr will panic if there is -// insufficient length. On exit, D contains the singular values of B in decreasing -// order. -// -// VT is a matrix of size n×ncvt whose elements are stored in vt. The elements -// of vt are modified to contain P^T * VT on exit. VT is not used if ncvt == 0. -// -// U is a matrix of size nru×n whose elements are stored in u. The elements -// of u are modified to contain U * Q on exit. U is not used if nru == 0. -// -// C is a matrix of size n×ncc whose elements are stored in c. The elements -// of c are modified to contain Q^T * C on exit. C is not used if ncc == 0. -// -// work contains temporary storage and must have length at least 4*(n-1). Dbdsqr -// will panic if there is insufficient working memory. -// -// Dbdsqr returns whether the decomposition was successful. -// -// Dbdsqr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dbdsqr(uplo blas.Uplo, n, ncvt, nru, ncc int, d, e, vt []float64, ldvt int, u []float64, ldu int, c []float64, ldc int, work []float64) (ok bool) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case ncvt < 0: - panic(ncvtLT0) - case nru < 0: - panic(nruLT0) - case ncc < 0: - panic(nccLT0) - case ldvt < max(1, ncvt): - panic(badLdVT) - case (ldu < max(1, n) && nru > 0) || (ldu < 1 && nru == 0): - panic(badLdU) - case ldc < max(1, ncc): - panic(badLdC) - } - - // Quick return if possible. - if n == 0 { - return true - } - - if len(vt) < (n-1)*ldvt+ncvt && ncvt != 0 { - panic(shortVT) - } - if len(u) < (nru-1)*ldu+n && nru != 0 { - panic(shortU) - } - if len(c) < (n-1)*ldc+ncc && ncc != 0 { - panic(shortC) - } - if len(d) < n { - panic(shortD) - } - if len(e) < n-1 { - panic(shortE) - } - if len(work) < 4*(n-1) { - panic(shortWork) - } - - var info int - bi := blas64.Implementation() - const maxIter = 6 - - if n != 1 { - // If the singular vectors do not need to be computed, use qd algorithm. - if !(ncvt > 0 || nru > 0 || ncc > 0) { - info = impl.Dlasq1(n, d, e, work) - // If info is 2 dqds didn't finish, and so try to. - if info != 2 { - return info == 0 - } - } - nm1 := n - 1 - nm12 := nm1 + nm1 - nm13 := nm12 + nm1 - idir := 0 - - eps := dlamchE - unfl := dlamchS - lower := uplo == blas.Lower - var cs, sn, r float64 - if lower { - for i := 0; i < n-1; i++ { - cs, sn, r = impl.Dlartg(d[i], e[i]) - d[i] = r - e[i] = sn * d[i+1] - d[i+1] *= cs - work[i] = cs - work[nm1+i] = sn - } - if nru > 0 { - impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, n, work, work[n-1:], u, ldu) - } - if ncc > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, n, ncc, work, work[n-1:], c, ldc) - } - } - // Compute singular values to a relative accuracy of tol. If tol is negative - // the values will be computed to an absolute accuracy of math.Abs(tol) * norm(b) - tolmul := math.Max(10, math.Min(100, math.Pow(eps, -1.0/8))) - tol := tolmul * eps - var smax float64 - for i := 0; i < n; i++ { - smax = math.Max(smax, math.Abs(d[i])) - } - for i := 0; i < n-1; i++ { - smax = math.Max(smax, math.Abs(e[i])) - } - - var sminl float64 - var thresh float64 - if tol >= 0 { - sminoa := math.Abs(d[0]) - if sminoa != 0 { - mu := sminoa - for i := 1; i < n; i++ { - mu = math.Abs(d[i]) * (mu / (mu + math.Abs(e[i-1]))) - sminoa = math.Min(sminoa, mu) - if sminoa == 0 { - break - } - } - } - sminoa = sminoa / math.Sqrt(float64(n)) - thresh = math.Max(tol*sminoa, float64(maxIter*n*n)*unfl) - } else { - thresh = math.Max(math.Abs(tol)*smax, float64(maxIter*n*n)*unfl) - } - // Prepare for the main iteration loop for the singular values. - maxIt := maxIter * n * n - iter := 0 - oldl2 := -1 - oldm := -1 - // m points to the last element of unconverged part of matrix. - m := n - - Outer: - for m > 1 { - if iter > maxIt { - info = 0 - for i := 0; i < n-1; i++ { - if e[i] != 0 { - info++ - } - } - return info == 0 - } - // Find diagonal block of matrix to work on. - if tol < 0 && math.Abs(d[m-1]) <= thresh { - d[m-1] = 0 - } - smax = math.Abs(d[m-1]) - smin := smax - var l2 int - var broke bool - for l3 := 0; l3 < m-1; l3++ { - l2 = m - l3 - 2 - abss := math.Abs(d[l2]) - abse := math.Abs(e[l2]) - if tol < 0 && abss <= thresh { - d[l2] = 0 - } - if abse <= thresh { - broke = true - break - } - smin = math.Min(smin, abss) - smax = math.Max(math.Max(smax, abss), abse) - } - if broke { - e[l2] = 0 - if l2 == m-2 { - // Convergence of bottom singular value, return to top. - m-- - continue - } - l2++ - } else { - l2 = 0 - } - // e[ll] through e[m-2] are nonzero, e[ll-1] is zero - if l2 == m-2 { - // Handle 2×2 block separately. - var sinr, cosr, sinl, cosl float64 - d[m-1], d[m-2], sinr, cosr, sinl, cosl = impl.Dlasv2(d[m-2], e[m-2], d[m-1]) - e[m-2] = 0 - if ncvt > 0 { - bi.Drot(ncvt, vt[(m-2)*ldvt:], 1, vt[(m-1)*ldvt:], 1, cosr, sinr) - } - if nru > 0 { - bi.Drot(nru, u[m-2:], ldu, u[m-1:], ldu, cosl, sinl) - } - if ncc > 0 { - bi.Drot(ncc, c[(m-2)*ldc:], 1, c[(m-1)*ldc:], 1, cosl, sinl) - } - m -= 2 - continue - } - // If working on a new submatrix, choose shift direction from larger end - // diagonal element toward smaller. - if l2 > oldm-1 || m-1 < oldl2 { - if math.Abs(d[l2]) >= math.Abs(d[m-1]) { - idir = 1 - } else { - idir = 2 - } - } - // Apply convergence tests. - // TODO(btracey): There is a lot of similar looking code here. See - // if there is a better way to de-duplicate. - if idir == 1 { - // Run convergence test in forward direction. - // First apply standard test to bottom of matrix. - if math.Abs(e[m-2]) <= math.Abs(tol)*math.Abs(d[m-1]) || (tol < 0 && math.Abs(e[m-2]) <= thresh) { - e[m-2] = 0 - continue - } - if tol >= 0 { - // If relative accuracy desired, apply convergence criterion forward. - mu := math.Abs(d[l2]) - sminl = mu - for l3 := l2; l3 < m-1; l3++ { - if math.Abs(e[l3]) <= tol*mu { - e[l3] = 0 - continue Outer - } - mu = math.Abs(d[l3+1]) * (mu / (mu + math.Abs(e[l3]))) - sminl = math.Min(sminl, mu) - } - } - } else { - // Run convergence test in backward direction. - // First apply standard test to top of matrix. - if math.Abs(e[l2]) <= math.Abs(tol)*math.Abs(d[l2]) || (tol < 0 && math.Abs(e[l2]) <= thresh) { - e[l2] = 0 - continue - } - if tol >= 0 { - // If relative accuracy desired, apply convergence criterion backward. - mu := math.Abs(d[m-1]) - sminl = mu - for l3 := m - 2; l3 >= l2; l3-- { - if math.Abs(e[l3]) <= tol*mu { - e[l3] = 0 - continue Outer - } - mu = math.Abs(d[l3]) * (mu / (mu + math.Abs(e[l3]))) - sminl = math.Min(sminl, mu) - } - } - } - oldl2 = l2 - oldm = m - // Compute shift. First, test if shifting would ruin relative accuracy, - // and if so set the shift to zero. - var shift float64 - if tol >= 0 && float64(n)*tol*(sminl/smax) <= math.Max(eps, (1.0/100)*tol) { - shift = 0 - } else { - var sl2 float64 - if idir == 1 { - sl2 = math.Abs(d[l2]) - shift, _ = impl.Dlas2(d[m-2], e[m-2], d[m-1]) - } else { - sl2 = math.Abs(d[m-1]) - shift, _ = impl.Dlas2(d[l2], e[l2], d[l2+1]) - } - // Test if shift is negligible - if sl2 > 0 { - if (shift/sl2)*(shift/sl2) < eps { - shift = 0 - } - } - } - iter += m - l2 + 1 - // If no shift, do simplified QR iteration. - if shift == 0 { - if idir == 1 { - cs := 1.0 - oldcs := 1.0 - var sn, r, oldsn float64 - for i := l2; i < m-1; i++ { - cs, sn, r = impl.Dlartg(d[i]*cs, e[i]) - if i > l2 { - e[i-1] = oldsn * r - } - oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i+1]*sn) - work[i-l2] = cs - work[i-l2+nm1] = sn - work[i-l2+nm12] = oldcs - work[i-l2+nm13] = oldsn - } - h := d[m-1] * cs - d[m-1] = h * oldcs - e[m-2] = h * oldsn - if ncvt > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) - } - if nru > 0 { - impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) - } - if ncc > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) - } - if math.Abs(e[m-2]) < thresh { - e[m-2] = 0 - } - } else { - cs := 1.0 - oldcs := 1.0 - var sn, r, oldsn float64 - for i := m - 1; i >= l2+1; i-- { - cs, sn, r = impl.Dlartg(d[i]*cs, e[i-1]) - if i < m-1 { - e[i] = oldsn * r - } - oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i-1]*sn) - work[i-l2-1] = cs - work[i-l2+nm1-1] = -sn - work[i-l2+nm12-1] = oldcs - work[i-l2+nm13-1] = -oldsn - } - h := d[l2] * cs - d[l2] = h * oldcs - e[l2] = h * oldsn - if ncvt > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) - } - if nru > 0 { - impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) - } - if ncc > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) - } - if math.Abs(e[l2]) <= thresh { - e[l2] = 0 - } - } - } else { - // Use nonzero shift. - if idir == 1 { - // Chase bulge from top to bottom. Save cosines and sines for - // later singular vector updates. - f := (math.Abs(d[l2]) - shift) * (math.Copysign(1, d[l2]) + shift/d[l2]) - g := e[l2] - var cosl, sinl float64 - for i := l2; i < m-1; i++ { - cosr, sinr, r := impl.Dlartg(f, g) - if i > l2 { - e[i-1] = r - } - f = cosr*d[i] + sinr*e[i] - e[i] = cosr*e[i] - sinr*d[i] - g = sinr * d[i+1] - d[i+1] *= cosr - cosl, sinl, r = impl.Dlartg(f, g) - d[i] = r - f = cosl*e[i] + sinl*d[i+1] - d[i+1] = cosl*d[i+1] - sinl*e[i] - if i < m-2 { - g = sinl * e[i+1] - e[i+1] = cosl * e[i+1] - } - work[i-l2] = cosr - work[i-l2+nm1] = sinr - work[i-l2+nm12] = cosl - work[i-l2+nm13] = sinl - } - e[m-2] = f - if ncvt > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) - } - if nru > 0 { - impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) - } - if ncc > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) - } - if math.Abs(e[m-2]) <= thresh { - e[m-2] = 0 - } - } else { - // Chase bulge from top to bottom. Save cosines and sines for - // later singular vector updates. - f := (math.Abs(d[m-1]) - shift) * (math.Copysign(1, d[m-1]) + shift/d[m-1]) - g := e[m-2] - for i := m - 1; i > l2; i-- { - cosr, sinr, r := impl.Dlartg(f, g) - if i < m-1 { - e[i] = r - } - f = cosr*d[i] + sinr*e[i-1] - e[i-1] = cosr*e[i-1] - sinr*d[i] - g = sinr * d[i-1] - d[i-1] *= cosr - cosl, sinl, r := impl.Dlartg(f, g) - d[i] = r - f = cosl*e[i-1] + sinl*d[i-1] - d[i-1] = cosl*d[i-1] - sinl*e[i-1] - if i > l2+1 { - g = sinl * e[i-2] - e[i-2] *= cosl - } - work[i-l2-1] = cosr - work[i-l2+nm1-1] = -sinr - work[i-l2+nm12-1] = cosl - work[i-l2+nm13-1] = -sinl - } - e[l2] = f - if math.Abs(e[l2]) <= thresh { - e[l2] = 0 - } - if ncvt > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) - } - if nru > 0 { - impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) - } - if ncc > 0 { - impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) - } - } - } - } - } - - // All singular values converged, make them positive. - for i := 0; i < n; i++ { - if d[i] < 0 { - d[i] *= -1 - if ncvt > 0 { - bi.Dscal(ncvt, -1, vt[i*ldvt:], 1) - } - } - } - - // Sort the singular values in decreasing order. - for i := 0; i < n-1; i++ { - isub := 0 - smin := d[0] - for j := 1; j < n-i; j++ { - if d[j] <= smin { - isub = j - smin = d[j] - } - } - if isub != n-i { - // Swap singular values and vectors. - d[isub] = d[n-i-1] - d[n-i-1] = smin - if ncvt > 0 { - bi.Dswap(ncvt, vt[isub*ldvt:], 1, vt[(n-i-1)*ldvt:], 1) - } - if nru > 0 { - bi.Dswap(nru, u[isub:], ldu, u[n-i-1:], ldu) - } - if ncc > 0 { - bi.Dswap(ncc, c[isub*ldc:], 1, c[(n-i-1)*ldc:], 1) - } - } - } - info = 0 - for i := 0; i < n-1; i++ { - if e[i] != 0 { - info++ - } - } - return info == 0 -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go deleted file mode 100644 index 7caa0b173..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dgebak updates an n×m matrix V as -// V = P D V, if side == lapack.EVRight, -// V = P D^{-1} V, if side == lapack.EVLeft, -// where P and D are n×n permutation and scaling matrices, respectively, -// implicitly represented by job, scale, ilo and ihi as returned by Dgebal. -// -// Typically, columns of the matrix V contain the right or left (determined by -// side) eigenvectors of the balanced matrix output by Dgebal, and Dgebak forms -// the eigenvectors of the original matrix. -// -// Dgebak is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgebak(job lapack.BalanceJob, side lapack.EVSide, n, ilo, ihi int, scale []float64, m int, v []float64, ldv int) { - switch { - case job != lapack.BalanceNone && job != lapack.Permute && job != lapack.Scale && job != lapack.PermuteScale: - panic(badBalanceJob) - case side != lapack.EVLeft && side != lapack.EVRight: - panic(badEVSide) - case n < 0: - panic(nLT0) - case ilo < 0 || max(0, n-1) < ilo: - panic(badIlo) - case ihi < min(ilo, n-1) || n <= ihi: - panic(badIhi) - case m < 0: - panic(mLT0) - case ldv < max(1, m): - panic(badLdV) - } - - // Quick return if possible. - if n == 0 || m == 0 { - return - } - - if len(scale) < n { - panic(shortScale) - } - if len(v) < (n-1)*ldv+m { - panic(shortV) - } - - // Quick return if possible. - if job == lapack.BalanceNone { - return - } - - bi := blas64.Implementation() - if ilo != ihi && job != lapack.Permute { - // Backward balance. - if side == lapack.EVRight { - for i := ilo; i <= ihi; i++ { - bi.Dscal(m, scale[i], v[i*ldv:], 1) - } - } else { - for i := ilo; i <= ihi; i++ { - bi.Dscal(m, 1/scale[i], v[i*ldv:], 1) - } - } - } - if job == lapack.Scale { - return - } - // Backward permutation. - for i := ilo - 1; i >= 0; i-- { - k := int(scale[i]) - if k == i { - continue - } - bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) - } - for i := ihi + 1; i < n; i++ { - k := int(scale[i]) - if k == i { - continue - } - bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go deleted file mode 100644 index 6fb5170cd..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dgebal balances an n×n matrix A. Balancing consists of two stages, permuting -// and scaling. Both steps are optional and depend on the value of job. -// -// Permuting consists of applying a permutation matrix P such that the matrix -// that results from P^T*A*P takes the upper block triangular form -// [ T1 X Y ] -// P^T A P = [ 0 B Z ], -// [ 0 0 T2 ] -// where T1 and T2 are upper triangular matrices and B contains at least one -// nonzero off-diagonal element in each row and column. The indices ilo and ihi -// mark the starting and ending columns of the submatrix B. The eigenvalues of A -// isolated in the first 0 to ilo-1 and last ihi+1 to n-1 elements on the -// diagonal can be read off without any roundoff error. -// -// Scaling consists of applying a diagonal similarity transformation D such that -// D^{-1}*B*D has the 1-norm of each row and its corresponding column nearly -// equal. The output matrix is -// [ T1 X*D Y ] -// [ 0 inv(D)*B*D inv(D)*Z ]. -// [ 0 0 T2 ] -// Scaling may reduce the 1-norm of the matrix, and improve the accuracy of -// the computed eigenvalues and/or eigenvectors. -// -// job specifies the operations that will be performed on A. -// If job is lapack.BalanceNone, Dgebal sets scale[i] = 1 for all i and returns ilo=0, ihi=n-1. -// If job is lapack.Permute, only permuting will be done. -// If job is lapack.Scale, only scaling will be done. -// If job is lapack.PermuteScale, both permuting and scaling will be done. -// -// On return, if job is lapack.Permute or lapack.PermuteScale, it will hold that -// A[i,j] == 0, for i > j and j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}. -// If job is lapack.BalanceNone or lapack.Scale, or if n == 0, it will hold that -// ilo == 0 and ihi == n-1. -// -// On return, scale will contain information about the permutations and scaling -// factors applied to A. If π(j) denotes the index of the column interchanged -// with column j, and D[j,j] denotes the scaling factor applied to column j, -// then -// scale[j] == π(j), for j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}, -// == D[j,j], for j ∈ {ilo, ..., ihi}. -// scale must have length equal to n, otherwise Dgebal will panic. -// -// Dgebal is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgebal(job lapack.BalanceJob, n int, a []float64, lda int, scale []float64) (ilo, ihi int) { - switch { - case job != lapack.BalanceNone && job != lapack.Permute && job != lapack.Scale && job != lapack.PermuteScale: - panic(badBalanceJob) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - ilo = 0 - ihi = n - 1 - - if n == 0 { - return ilo, ihi - } - - if len(scale) != n { - panic(shortScale) - } - - if job == lapack.BalanceNone { - for i := range scale { - scale[i] = 1 - } - return ilo, ihi - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - bi := blas64.Implementation() - swapped := true - - if job == lapack.Scale { - goto scaling - } - - // Permutation to isolate eigenvalues if possible. - // - // Search for rows isolating an eigenvalue and push them down. - for swapped { - swapped = false - rows: - for i := ihi; i >= 0; i-- { - for j := 0; j <= ihi; j++ { - if i == j { - continue - } - if a[i*lda+j] != 0 { - continue rows - } - } - // Row i has only zero off-diagonal elements in the - // block A[ilo:ihi+1,ilo:ihi+1]. - scale[ihi] = float64(i) - if i != ihi { - bi.Dswap(ihi+1, a[i:], lda, a[ihi:], lda) - bi.Dswap(n, a[i*lda:], 1, a[ihi*lda:], 1) - } - if ihi == 0 { - scale[0] = 1 - return ilo, ihi - } - ihi-- - swapped = true - break - } - } - // Search for columns isolating an eigenvalue and push them left. - swapped = true - for swapped { - swapped = false - columns: - for j := ilo; j <= ihi; j++ { - for i := ilo; i <= ihi; i++ { - if i == j { - continue - } - if a[i*lda+j] != 0 { - continue columns - } - } - // Column j has only zero off-diagonal elements in the - // block A[ilo:ihi+1,ilo:ihi+1]. - scale[ilo] = float64(j) - if j != ilo { - bi.Dswap(ihi+1, a[j:], lda, a[ilo:], lda) - bi.Dswap(n-ilo, a[j*lda+ilo:], 1, a[ilo*lda+ilo:], 1) - } - swapped = true - ilo++ - break - } - } - -scaling: - for i := ilo; i <= ihi; i++ { - scale[i] = 1 - } - - if job == lapack.Permute { - return ilo, ihi - } - - // Balance the submatrix in rows ilo to ihi. - - const ( - // sclfac should be a power of 2 to avoid roundoff errors. - // Elements of scale are restricted to powers of sclfac, - // therefore the matrix will be only nearly balanced. - sclfac = 2 - // factor determines the minimum reduction of the row and column - // norms that is considered non-negligible. It must be less than 1. - factor = 0.95 - ) - sfmin1 := dlamchS / dlamchP - sfmax1 := 1 / sfmin1 - sfmin2 := sfmin1 * sclfac - sfmax2 := 1 / sfmin2 - - // Iterative loop for norm reduction. - var conv bool - for !conv { - conv = true - for i := ilo; i <= ihi; i++ { - c := bi.Dnrm2(ihi-ilo+1, a[ilo*lda+i:], lda) - r := bi.Dnrm2(ihi-ilo+1, a[i*lda+ilo:], 1) - ica := bi.Idamax(ihi+1, a[i:], lda) - ca := math.Abs(a[ica*lda+i]) - ira := bi.Idamax(n-ilo, a[i*lda+ilo:], 1) - ra := math.Abs(a[i*lda+ilo+ira]) - - // Guard against zero c or r due to underflow. - if c == 0 || r == 0 { - continue - } - g := r / sclfac - f := 1.0 - s := c + r - for c < g && math.Max(f, math.Max(c, ca)) < sfmax2 && math.Min(r, math.Min(g, ra)) > sfmin2 { - if math.IsNaN(c + f + ca + r + g + ra) { - // Panic if NaN to avoid infinite loop. - panic("lapack: NaN") - } - f *= sclfac - c *= sclfac - ca *= sclfac - g /= sclfac - r /= sclfac - ra /= sclfac - } - g = c / sclfac - for r <= g && math.Max(r, ra) < sfmax2 && math.Min(math.Min(f, c), math.Min(g, ca)) > sfmin2 { - f /= sclfac - c /= sclfac - ca /= sclfac - g /= sclfac - r *= sclfac - ra *= sclfac - } - - if c+r >= factor*s { - // Reduction would be negligible. - continue - } - if f < 1 && scale[i] < 1 && f*scale[i] <= sfmin1 { - continue - } - if f > 1 && scale[i] > 1 && scale[i] >= sfmax1/f { - continue - } - - // Now balance. - scale[i] *= f - bi.Dscal(n-ilo, 1/f, a[i*lda+ilo:], 1) - bi.Dscal(ihi+1, f, a[i:], lda) - conv = false - } - } - return ilo, ihi -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go deleted file mode 100644 index cf951a120..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dgebd2 reduces an m×n matrix A to upper or lower bidiagonal form by an orthogonal -// transformation. -// Q^T * A * P = B -// if m >= n, B is upper diagonal, otherwise B is lower bidiagonal. -// d is the diagonal, len = min(m,n) -// e is the off-diagonal len = min(m,n)-1 -// -// Dgebd2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgebd2(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - minmn := min(m, n) - if minmn == 0 { - return - } - - switch { - case len(d) < minmn: - panic(shortD) - case len(e) < minmn-1: - panic(shortE) - case len(tauQ) < minmn: - panic(shortTauQ) - case len(tauP) < minmn: - panic(shortTauP) - case len(work) < max(m, n): - panic(shortWork) - } - - if m >= n { - for i := 0; i < n; i++ { - a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) - d[i] = a[i*lda+i] - a[i*lda+i] = 1 - // Apply H_i to A[i:m, i+1:n] from the left. - if i < n-1 { - impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tauQ[i], a[i*lda+i+1:], lda, work) - } - a[i*lda+i] = d[i] - if i < n-1 { - a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) - e[i] = a[i*lda+i+1] - a[i*lda+i+1] = 1 - impl.Dlarf(blas.Right, m-i-1, n-i-1, a[i*lda+i+1:], 1, tauP[i], a[(i+1)*lda+i+1:], lda, work) - a[i*lda+i+1] = e[i] - } else { - tauP[i] = 0 - } - } - return - } - for i := 0; i < m; i++ { - a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) - d[i] = a[i*lda+i] - a[i*lda+i] = 1 - if i < m-1 { - impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tauP[i], a[(i+1)*lda+i:], lda, work) - } - a[i*lda+i] = d[i] - if i < m-1 { - a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) - e[i] = a[(i+1)*lda+i] - a[(i+1)*lda+i] = 1 - impl.Dlarf(blas.Left, m-i-1, n-i-1, a[(i+1)*lda+i:], lda, tauQ[i], a[(i+1)*lda+i+1:], lda, work) - a[(i+1)*lda+i] = e[i] - } else { - tauQ[i] = 0 - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go deleted file mode 100644 index f03bf8d93..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dgebrd reduces a general m×n matrix A to upper or lower bidiagonal form B by -// an orthogonal transformation: -// Q^T * A * P = B. -// The diagonal elements of B are stored in d and the off-diagonal elements are stored -// in e. These are additionally stored along the diagonal of A and the off-diagonal -// of A. If m >= n B is an upper-bidiagonal matrix, and if m < n B is a -// lower-bidiagonal matrix. -// -// The remaining elements of A store the data needed to construct Q and P. -// The matrices Q and P are products of elementary reflectors -// if m >= n, Q = H_0 * H_1 * ... * H_{n-1}, -// P = G_0 * G_1 * ... * G_{n-2}, -// if m < n, Q = H_0 * H_1 * ... * H_{m-2}, -// P = G_0 * G_1 * ... * G_{m-1}, -// where -// H_i = I - tauQ[i] * v_i * v_i^T, -// G_i = I - tauP[i] * u_i * u_i^T. -// -// As an example, on exit the entries of A when m = 6, and n = 5 -// [ d e u1 u1 u1] -// [v1 d e u2 u2] -// [v1 v2 d e u3] -// [v1 v2 v3 d e] -// [v1 v2 v3 v4 d] -// [v1 v2 v3 v4 v5] -// and when m = 5, n = 6 -// [ d u1 u1 u1 u1 u1] -// [ e d u2 u2 u2 u2] -// [v1 e d u3 u3 u3] -// [v1 v2 e d u4 u4] -// [v1 v2 v3 e d u5] -// -// d, tauQ, and tauP must all have length at least min(m,n), and e must have -// length min(m,n) - 1, unless lwork is -1 when there is no check except for -// work which must have a length of at least one. -// -// work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= max(1,m,n) or be -1 and this function will panic otherwise. -// Dgebrd is blocked decomposition, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dgebrd, -// the optimal work length will be stored into work[0]. -// -// Dgebrd is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgebrd(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64, lwork int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, max(m, n)) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - minmn := min(m, n) - if minmn == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(1, "DGEBRD", " ", m, n, -1, -1) - lwkopt := (m + n) * nb - if lwork == -1 { - work[0] = float64(lwkopt) - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(d) < minmn: - panic(shortD) - case len(e) < minmn-1: - panic(shortE) - case len(tauQ) < minmn: - panic(shortTauQ) - case len(tauP) < minmn: - panic(shortTauP) - } - - nx := minmn - ws := max(m, n) - if 1 < nb && nb < minmn { - // At least one blocked operation can be done. - // Get the crossover point nx. - nx = max(nb, impl.Ilaenv(3, "DGEBRD", " ", m, n, -1, -1)) - // Determine when to switch from blocked to unblocked code. - if nx < minmn { - // At least one blocked operation will be done. - ws = (m + n) * nb - if lwork < ws { - // Not enough work space for the optimal nb, - // consider using a smaller block size. - nbmin := impl.Ilaenv(2, "DGEBRD", " ", m, n, -1, -1) - if lwork >= (m+n)*nbmin { - // Enough work space for minimum block size. - nb = lwork / (m + n) - } else { - nb = minmn - nx = minmn - } - } - } - } - bi := blas64.Implementation() - ldworkx := nb - ldworky := nb - var i int - for i = 0; i < minmn-nx; i += nb { - // Reduce rows and columns i:i+nb to bidiagonal form and return - // the matrices X and Y which are needed to update the unreduced - // part of the matrix. - // X is stored in the first m rows of work, y in the next rows. - x := work[:m*ldworkx] - y := work[m*ldworkx:] - impl.Dlabrd(m-i, n-i, nb, a[i*lda+i:], lda, - d[i:], e[i:], tauQ[i:], tauP[i:], - x, ldworkx, y, ldworky) - - // Update the trailing submatrix A[i+nb:m,i+nb:n], using an update - // of the form A := A - V*Y**T - X*U**T - bi.Dgemm(blas.NoTrans, blas.Trans, m-i-nb, n-i-nb, nb, - -1, a[(i+nb)*lda+i:], lda, y[nb*ldworky:], ldworky, - 1, a[(i+nb)*lda+i+nb:], lda) - - bi.Dgemm(blas.NoTrans, blas.NoTrans, m-i-nb, n-i-nb, nb, - -1, x[nb*ldworkx:], ldworkx, a[i*lda+i+nb:], lda, - 1, a[(i+nb)*lda+i+nb:], lda) - - // Copy diagonal and off-diagonal elements of B back into A. - if m >= n { - for j := i; j < i+nb; j++ { - a[j*lda+j] = d[j] - a[j*lda+j+1] = e[j] - } - } else { - for j := i; j < i+nb; j++ { - a[j*lda+j] = d[j] - a[(j+1)*lda+j] = e[j] - } - } - } - // Use unblocked code to reduce the remainder of the matrix. - impl.Dgebd2(m-i, n-i, a[i*lda+i:], lda, d[i:], e[i:], tauQ[i:], tauP[i:], work) - work[0] = float64(ws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go deleted file mode 100644 index 1d1ca586b..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dgecon estimates the reciprocal of the condition number of the n×n matrix A -// given the LU decomposition of the matrix. The condition number computed may -// be based on the 1-norm or the ∞-norm. -// -// The slice a contains the result of the LU decomposition of A as computed by Dgetrf. -// -// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. -// -// work is a temporary data slice of length at least 4*n and Dgecon will panic otherwise. -// -// iwork is a temporary data slice of length at least n and Dgecon will panic otherwise. -func (impl Implementation) Dgecon(norm lapack.MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { - switch { - case norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum: - panic(badNorm) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return 1 - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(work) < 4*n: - panic(shortWork) - case len(iwork) < n: - panic(shortIWork) - } - - // Quick return if possible. - if anorm == 0 { - return 0 - } - - bi := blas64.Implementation() - var rcond, ainvnm float64 - var kase int - var normin bool - isave := new([3]int) - onenrm := norm == lapack.MaxColumnSum - smlnum := dlamchS - kase1 := 2 - if onenrm { - kase1 = 1 - } - for { - ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) - if kase == 0 { - if ainvnm != 0 { - rcond = (1 / ainvnm) / anorm - } - return rcond - } - var sl, su float64 - if kase == kase1 { - sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.Unit, normin, n, a, lda, work, work[2*n:]) - su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) - } else { - su = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) - sl = impl.Dlatrs(blas.Lower, blas.Trans, blas.Unit, normin, n, a, lda, work, work[2*n:]) - } - scale := sl * su - normin = true - if scale != 1 { - ix := bi.Idamax(n, work, 1) - if scale == 0 || scale < math.Abs(work[ix])*smlnum { - return rcond - } - impl.Drscl(n, scale, work, 1) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go deleted file mode 100644 index 0da4e609c..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dgeev computes the eigenvalues and, optionally, the left and/or right -// eigenvectors for an n×n real nonsymmetric matrix A. -// -// The right eigenvector v_j of A corresponding to an eigenvalue λ_j -// is defined by -// A v_j = λ_j v_j, -// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by -// u_j^H A = λ_j u_j^H, -// where u_j^H is the conjugate transpose of u_j. -// -// On return, A will be overwritten and the left and right eigenvectors will be -// stored, respectively, in the columns of the n×n matrices VL and VR in the -// same order as their eigenvalues. If the j-th eigenvalue is real, then -// u_j = VL[:,j], -// v_j = VR[:,j], -// and if it is not real, then j and j+1 form a complex conjugate pair and the -// eigenvectors can be recovered as -// u_j = VL[:,j] + i*VL[:,j+1], -// u_{j+1} = VL[:,j] - i*VL[:,j+1], -// v_j = VR[:,j] + i*VR[:,j+1], -// v_{j+1} = VR[:,j] - i*VR[:,j+1], -// where i is the imaginary unit. The computed eigenvectors are normalized to -// have Euclidean norm equal to 1 and largest component real. -// -// Left eigenvectors will be computed only if jobvl == lapack.LeftEVCompute, -// otherwise jobvl must be lapack.LeftEVNone. -// Right eigenvectors will be computed only if jobvr == lapack.RightEVCompute, -// otherwise jobvr must be lapack.RightEVNone. -// For other values of jobvl and jobvr Dgeev will panic. -// -// wr and wi contain the real and imaginary parts, respectively, of the computed -// eigenvalues. Complex conjugate pairs of eigenvalues appear consecutively with -// the eigenvalue having the positive imaginary part first. -// wr and wi must have length n, and Dgeev will panic otherwise. -// -// work must have length at least lwork and lwork must be at least max(1,4*n) if -// the left or right eigenvectors are computed, and at least max(1,3*n) if no -// eigenvectors are computed. For good performance, lwork must generally be -// larger. On return, optimal value of lwork will be stored in work[0]. -// -// If lwork == -1, instead of performing Dgeev, the function only calculates the -// optimal vaule of lwork and stores it into work[0]. -// -// On return, first is the index of the first valid eigenvalue. If first == 0, -// all eigenvalues and eigenvectors have been computed. If first is positive, -// Dgeev failed to compute all the eigenvalues, no eigenvectors have been -// computed and wr[first:] and wi[first:] contain those eigenvalues which have -// converged. -func (impl Implementation) Dgeev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) { - wantvl := jobvl == lapack.LeftEVCompute - wantvr := jobvr == lapack.RightEVCompute - var minwrk int - if wantvl || wantvr { - minwrk = max(1, 4*n) - } else { - minwrk = max(1, 3*n) - } - switch { - case jobvl != lapack.LeftEVCompute && jobvl != lapack.LeftEVNone: - panic(badLeftEVJob) - case jobvr != lapack.RightEVCompute && jobvr != lapack.RightEVNone: - panic(badRightEVJob) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case ldvl < 1 || (ldvl < n && wantvl): - panic(badLdVL) - case ldvr < 1 || (ldvr < n && wantvr): - panic(badLdVR) - case lwork < minwrk && lwork != -1: - panic(badLWork) - case len(work) < lwork: - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - work[0] = 1 - return 0 - } - - maxwrk := 2*n + n*impl.Ilaenv(1, "DGEHRD", " ", n, 1, n, 0) - if wantvl || wantvr { - maxwrk = max(maxwrk, 2*n+(n-1)*impl.Ilaenv(1, "DORGHR", " ", n, 1, n, -1)) - impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, 0, n-1, - a, lda, wr, wi, nil, n, work, -1) - maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) - side := lapack.EVLeft - if wantvr { - side = lapack.EVRight - } - impl.Dtrevc3(side, lapack.EVAllMulQ, nil, n, a, lda, vl, ldvl, vr, ldvr, - n, work, -1) - maxwrk = max(maxwrk, n+int(work[0])) - maxwrk = max(maxwrk, 4*n) - } else { - impl.Dhseqr(lapack.EigenvaluesOnly, lapack.SchurNone, n, 0, n-1, - a, lda, wr, wi, vr, ldvr, work, -1) - maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) - } - maxwrk = max(maxwrk, minwrk) - - if lwork == -1 { - work[0] = float64(maxwrk) - return 0 - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(wr) != n: - panic(badLenWr) - case len(wi) != n: - panic(badLenWi) - case len(vl) < (n-1)*ldvl+n && wantvl: - panic(shortVL) - case len(vr) < (n-1)*ldvr+n && wantvr: - panic(shortVR) - } - - // Get machine constants. - smlnum := math.Sqrt(dlamchS) / dlamchP - bignum := 1 / smlnum - - // Scale A if max element outside range [smlnum,bignum]. - anrm := impl.Dlange(lapack.MaxAbs, n, n, a, lda, nil) - var scalea bool - var cscale float64 - if 0 < anrm && anrm < smlnum { - scalea = true - cscale = smlnum - } else if anrm > bignum { - scalea = true - cscale = bignum - } - if scalea { - impl.Dlascl(lapack.General, 0, 0, anrm, cscale, n, n, a, lda) - } - - // Balance the matrix. - workbal := work[:n] - ilo, ihi := impl.Dgebal(lapack.PermuteScale, n, a, lda, workbal) - - // Reduce to upper Hessenberg form. - iwrk := 2 * n - tau := work[n : iwrk-1] - impl.Dgehrd(n, ilo, ihi, a, lda, tau, work[iwrk:], lwork-iwrk) - - var side lapack.EVSide - if wantvl { - side = lapack.EVLeft - // Copy Householder vectors to VL. - impl.Dlacpy(blas.Lower, n, n, a, lda, vl, ldvl) - // Generate orthogonal matrix in VL. - impl.Dorghr(n, ilo, ihi, vl, ldvl, tau, work[iwrk:], lwork-iwrk) - // Perform QR iteration, accumulating Schur vectors in VL. - iwrk = n - first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, ilo, ihi, - a, lda, wr, wi, vl, ldvl, work[iwrk:], lwork-iwrk) - if wantvr { - // Want left and right eigenvectors. - // Copy Schur vectors to VR. - side = lapack.EVBoth - impl.Dlacpy(blas.All, n, n, vl, ldvl, vr, ldvr) - } - } else if wantvr { - side = lapack.EVRight - // Copy Householder vectors to VR. - impl.Dlacpy(blas.Lower, n, n, a, lda, vr, ldvr) - // Generate orthogonal matrix in VR. - impl.Dorghr(n, ilo, ihi, vr, ldvr, tau, work[iwrk:], lwork-iwrk) - // Perform QR iteration, accumulating Schur vectors in VR. - iwrk = n - first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, ilo, ihi, - a, lda, wr, wi, vr, ldvr, work[iwrk:], lwork-iwrk) - } else { - // Compute eigenvalues only. - iwrk = n - first = impl.Dhseqr(lapack.EigenvaluesOnly, lapack.SchurNone, n, ilo, ihi, - a, lda, wr, wi, nil, 1, work[iwrk:], lwork-iwrk) - } - - if first > 0 { - if scalea { - // Undo scaling. - impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) - impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) - impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wr, 1) - impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wi, 1) - } - work[0] = float64(maxwrk) - return first - } - - if wantvl || wantvr { - // Compute left and/or right eigenvectors. - impl.Dtrevc3(side, lapack.EVAllMulQ, nil, n, - a, lda, vl, ldvl, vr, ldvr, n, work[iwrk:], lwork-iwrk) - } - bi := blas64.Implementation() - if wantvl { - // Undo balancing of left eigenvectors. - impl.Dgebak(lapack.PermuteScale, lapack.EVLeft, n, ilo, ihi, workbal, n, vl, ldvl) - // Normalize left eigenvectors and make largest component real. - for i, wii := range wi { - if wii < 0 { - continue - } - if wii == 0 { - scl := 1 / bi.Dnrm2(n, vl[i:], ldvl) - bi.Dscal(n, scl, vl[i:], ldvl) - continue - } - scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vl[i:], ldvl), bi.Dnrm2(n, vl[i+1:], ldvl)) - bi.Dscal(n, scl, vl[i:], ldvl) - bi.Dscal(n, scl, vl[i+1:], ldvl) - for k := 0; k < n; k++ { - vi := vl[k*ldvl+i] - vi1 := vl[k*ldvl+i+1] - work[iwrk+k] = vi*vi + vi1*vi1 - } - k := bi.Idamax(n, work[iwrk:iwrk+n], 1) - cs, sn, _ := impl.Dlartg(vl[k*ldvl+i], vl[k*ldvl+i+1]) - bi.Drot(n, vl[i:], ldvl, vl[i+1:], ldvl, cs, sn) - vl[k*ldvl+i+1] = 0 - } - } - if wantvr { - // Undo balancing of right eigenvectors. - impl.Dgebak(lapack.PermuteScale, lapack.EVRight, n, ilo, ihi, workbal, n, vr, ldvr) - // Normalize right eigenvectors and make largest component real. - for i, wii := range wi { - if wii < 0 { - continue - } - if wii == 0 { - scl := 1 / bi.Dnrm2(n, vr[i:], ldvr) - bi.Dscal(n, scl, vr[i:], ldvr) - continue - } - scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vr[i:], ldvr), bi.Dnrm2(n, vr[i+1:], ldvr)) - bi.Dscal(n, scl, vr[i:], ldvr) - bi.Dscal(n, scl, vr[i+1:], ldvr) - for k := 0; k < n; k++ { - vi := vr[k*ldvr+i] - vi1 := vr[k*ldvr+i+1] - work[iwrk+k] = vi*vi + vi1*vi1 - } - k := bi.Idamax(n, work[iwrk:iwrk+n], 1) - cs, sn, _ := impl.Dlartg(vr[k*ldvr+i], vr[k*ldvr+i+1]) - bi.Drot(n, vr[i:], ldvr, vr[i+1:], ldvr, cs, sn) - vr[k*ldvr+i+1] = 0 - } - } - - if scalea { - // Undo scaling. - impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) - impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) - } - - work[0] = float64(maxwrk) - return first -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go deleted file mode 100644 index 261f21b98..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dgehd2 reduces a block of a general n×n matrix A to upper Hessenberg form H -// by an orthogonal similarity transformation Q^T * A * Q = H. -// -// The matrix Q is represented as a product of (ihi-ilo) elementary -// reflectors -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. -// Each H_i has the form -// H_i = I - tau[i] * v * v^T -// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. -// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. -// -// On entry, a contains the n×n general matrix to be reduced. On return, the -// upper triangle and the first subdiagonal of A are overwritten with the upper -// Hessenberg matrix H, and the elements below the first subdiagonal, with the -// slice tau, represent the orthogonal matrix Q as a product of elementary -// reflectors. -// -// The contents of A are illustrated by the following example, with n = 7, ilo = -// 1 and ihi = 5. -// On entry, -// [ a a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a ] -// on return, -// [ a a h h h h a ] -// [ a h h h h a ] -// [ h h h h h h ] -// [ v1 h h h h h ] -// [ v1 v2 h h h h ] -// [ v1 v2 v3 h h h ] -// [ a ] -// where a denotes an element of the original matrix A, h denotes a -// modified element of the upper Hessenberg matrix H, and vi denotes an -// element of the vector defining H_i. -// -// ilo and ihi determine the block of A that will be reduced to upper Hessenberg -// form. It must hold that 0 <= ilo <= ihi <= max(0, n-1), otherwise Dgehd2 will -// panic. -// -// On return, tau will contain the scalar factors of the elementary reflectors. -// It must have length equal to n-1, otherwise Dgehd2 will panic. -// -// work must have length at least n, otherwise Dgehd2 will panic. -// -// Dgehd2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgehd2(n, ilo, ihi int, a []float64, lda int, tau, work []float64) { - switch { - case n < 0: - panic(nLT0) - case ilo < 0 || max(0, n-1) < ilo: - panic(badIlo) - case ihi < min(ilo, n-1) || n <= ihi: - panic(badIhi) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(tau) != n-1: - panic(badLenTau) - case len(work) < n: - panic(shortWork) - } - - for i := ilo; i < ihi; i++ { - // Compute elementary reflector H_i to annihilate A[i+2:ihi+1,i]. - var aii float64 - aii, tau[i] = impl.Dlarfg(ihi-i, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) - a[(i+1)*lda+i] = 1 - - // Apply H_i to A[0:ihi+1,i+1:ihi+1] from the right. - impl.Dlarf(blas.Right, ihi+1, ihi-i, a[(i+1)*lda+i:], lda, tau[i], a[i+1:], lda, work) - - // Apply H_i to A[i+1:ihi+1,i+1:n] from the left. - impl.Dlarf(blas.Left, ihi-i, n-i-1, a[(i+1)*lda+i:], lda, tau[i], a[(i+1)*lda+i+1:], lda, work) - a[(i+1)*lda+i] = aii - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go deleted file mode 100644 index 89b73cef9..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dgehrd reduces a block of a real n×n general matrix A to upper Hessenberg -// form H by an orthogonal similarity transformation Q^T * A * Q = H. -// -// The matrix Q is represented as a product of (ihi-ilo) elementary -// reflectors -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. -// Each H_i has the form -// H_i = I - tau[i] * v * v^T -// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. -// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. -// -// On entry, a contains the n×n general matrix to be reduced. On return, the -// upper triangle and the first subdiagonal of A will be overwritten with the -// upper Hessenberg matrix H, and the elements below the first subdiagonal, with -// the slice tau, represent the orthogonal matrix Q as a product of elementary -// reflectors. -// -// The contents of a are illustrated by the following example, with n = 7, ilo = -// 1 and ihi = 5. -// On entry, -// [ a a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a a a a a a ] -// [ a ] -// on return, -// [ a a h h h h a ] -// [ a h h h h a ] -// [ h h h h h h ] -// [ v1 h h h h h ] -// [ v1 v2 h h h h ] -// [ v1 v2 v3 h h h ] -// [ a ] -// where a denotes an element of the original matrix A, h denotes a -// modified element of the upper Hessenberg matrix H, and vi denotes an -// element of the vector defining H_i. -// -// ilo and ihi determine the block of A that will be reduced to upper Hessenberg -// form. It must hold that 0 <= ilo <= ihi < n if n > 0, and ilo == 0 and ihi == -// -1 if n == 0, otherwise Dgehrd will panic. -// -// On return, tau will contain the scalar factors of the elementary reflectors. -// Elements tau[:ilo] and tau[ihi:] will be set to zero. tau must have length -// equal to n-1 if n > 0, otherwise Dgehrd will panic. -// -// work must have length at least lwork and lwork must be at least max(1,n), -// otherwise Dgehrd will panic. On return, work[0] contains the optimal value of -// lwork. -// -// If lwork == -1, instead of performing Dgehrd, only the optimal value of lwork -// will be stored in work[0]. -// -// Dgehrd is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgehrd(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case n < 0: - panic(nLT0) - case ilo < 0 || max(0, n-1) < ilo: - panic(badIlo) - case ihi < min(ilo, n-1) || n <= ihi: - panic(badIhi) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, n) && lwork != -1: - panic(badLWork) - case len(work) < lwork: - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - work[0] = 1 - return - } - - const ( - nbmax = 64 - ldt = nbmax + 1 - tsize = ldt * nbmax - ) - // Compute the workspace requirements. - nb := min(nbmax, impl.Ilaenv(1, "DGEHRD", " ", n, ilo, ihi, -1)) - lwkopt := n*nb + tsize - if lwork == -1 { - work[0] = float64(lwkopt) - return - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - if len(tau) != n-1 { - panic(badLenTau) - } - - // Set tau[:ilo] and tau[ihi:] to zero. - for i := 0; i < ilo; i++ { - tau[i] = 0 - } - for i := ihi; i < n-1; i++ { - tau[i] = 0 - } - - // Quick return if possible. - nh := ihi - ilo + 1 - if nh <= 1 { - work[0] = 1 - return - } - - // Determine the block size. - nbmin := 2 - var nx int - if 1 < nb && nb < nh { - // Determine when to cross over from blocked to unblocked code - // (last block is always handled by unblocked code). - nx = max(nb, impl.Ilaenv(3, "DGEHRD", " ", n, ilo, ihi, -1)) - if nx < nh { - // Determine if workspace is large enough for blocked code. - if lwork < n*nb+tsize { - // Not enough workspace to use optimal nb: - // determine the minimum value of nb, and reduce - // nb or force use of unblocked code. - nbmin = max(2, impl.Ilaenv(2, "DGEHRD", " ", n, ilo, ihi, -1)) - if lwork >= n*nbmin+tsize { - nb = (lwork - tsize) / n - } else { - nb = 1 - } - } - } - } - ldwork := nb // work is used as an n×nb matrix. - - var i int - if nb < nbmin || nh <= nb { - // Use unblocked code below. - i = ilo - } else { - // Use blocked code. - bi := blas64.Implementation() - iwt := n * nb // Size of the matrix Y and index where the matrix T starts in work. - for i = ilo; i < ihi-nx; i += nb { - ib := min(nb, ihi-i) - - // Reduce columns [i:i+ib] to Hessenberg form, returning the - // matrices V and T of the block reflector H = I - V*T*V^T - // which performs the reduction, and also the matrix Y = A*V*T. - impl.Dlahr2(ihi+1, i+1, ib, a[i:], lda, tau[i:], work[iwt:], ldt, work, ldwork) - - // Apply the block reflector H to A[:ihi+1,i+ib:ihi+1] from the - // right, computing A := A - Y * V^T. V[i+ib,i+ib-1] must be set - // to 1. - ei := a[(i+ib)*lda+i+ib-1] - a[(i+ib)*lda+i+ib-1] = 1 - bi.Dgemm(blas.NoTrans, blas.Trans, ihi+1, ihi-i-ib+1, ib, - -1, work, ldwork, - a[(i+ib)*lda+i:], lda, - 1, a[i+ib:], lda) - a[(i+ib)*lda+i+ib-1] = ei - - // Apply the block reflector H to A[0:i+1,i+1:i+ib-1] from the - // right. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, i+1, ib-1, - 1, a[(i+1)*lda+i:], lda, work, ldwork) - for j := 0; j <= ib-2; j++ { - bi.Daxpy(i+1, -1, work[j:], ldwork, a[i+j+1:], lda) - } - - // Apply the block reflector H to A[i+1:ihi+1,i+ib:n] from the - // left. - impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, - ihi-i, n-i-ib, ib, - a[(i+1)*lda+i:], lda, work[iwt:], ldt, a[(i+1)*lda+i+ib:], lda, work, ldwork) - } - } - // Use unblocked code to reduce the rest of the matrix. - impl.Dgehd2(n, i, ihi, a, lda, tau, work) - work[0] = float64(lwkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go deleted file mode 100644 index abc96f7d2..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dgelq2 computes the LQ factorization of the m×n matrix A. -// -// In an LQ factorization, L is a lower triangular m×n matrix, and Q is an n×n -// orthonormal matrix. -// -// a is modified to contain the information to construct L and Q. -// The lower triangle of a contains the matrix L. The upper triangular elements -// (not including the diagonal) contain the elementary reflectors. tau is modified -// to contain the reflector scales. tau must have length of at least k = min(m,n) -// and this function will panic otherwise. -// -// See Dgeqr2 for a description of the elementary reflectors and orthonormal -// matrix Q. Q is constructed as a product of these elementary reflectors, -// Q = H_{k-1} * ... * H_1 * H_0. -// -// work is temporary storage of length at least m and this function will panic otherwise. -// -// Dgelq2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgelq2(m, n int, a []float64, lda int, tau, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - k := min(m, n) - if k == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(work) < m: - panic(shortWork) - } - - for i := 0; i < k; i++ { - a[i*lda+i], tau[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) - if i < m-1 { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(blas.Right, m-i-1, n-i, - a[i*lda+i:], 1, - tau[i], - a[(i+1)*lda+i:], lda, - work) - a[i*lda+i] = aii - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go deleted file mode 100644 index f1fd13a01..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dgelqf computes the LQ factorization of the m×n matrix A using a blocked -// algorithm. See the documentation for Dgelq2 for a description of the -// parameters at entry and exit. -// -// work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m, and this function will panic otherwise. -// Dgelqf is a blocked LQ factorization, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dgelqf, -// the optimal work length will be stored into work[0]. -// -// tau must have length at least min(m,n), and this function will panic otherwise. -func (impl Implementation) Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, m) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - k := min(m, n) - if k == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) - if lwork == -1 { - work[0] = float64(m * nb) - return - } - - if len(a) < (m-1)*lda+n { - panic(shortA) - } - if len(tau) < k { - panic(shortTau) - } - - // Find the optimal blocking size based on the size of available memory - // and optimal machine parameters. - nbmin := 2 - var nx int - iws := m - if 1 < nb && nb < k { - nx = max(0, impl.Ilaenv(3, "DGELQF", " ", m, n, -1, -1)) - if nx < k { - iws = m * nb - if lwork < iws { - nb = lwork / m - nbmin = max(2, impl.Ilaenv(2, "DGELQF", " ", m, n, -1, -1)) - } - } - } - ldwork := nb - // Computed blocked LQ factorization. - var i int - if nbmin <= nb && nb < k && nx < k { - for i = 0; i < k-nx; i += nb { - ib := min(k-i, nb) - impl.Dgelq2(ib, n-i, a[i*lda+i:], lda, tau[i:], work) - if i+ib < m { - impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - work, ldwork) - impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Forward, lapack.RowWise, - m-i-ib, n-i, ib, - a[i*lda+i:], lda, - work, ldwork, - a[(i+ib)*lda+i:], lda, - work[ib*ldwork:], ldwork) - } - } - } - // Perform unblocked LQ factorization on the remainder. - if i < k { - impl.Dgelq2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) - } - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go deleted file mode 100644 index a3894b6a0..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dgels finds a minimum-norm solution based on the matrices A and B using the -// QR or LQ factorization. Dgels returns false if the matrix -// A is singular, and true if this solution was successfully found. -// -// The minimization problem solved depends on the input parameters. -// -// 1. If m >= n and trans == blas.NoTrans, Dgels finds X such that || A*X - B||_2 -// is minimized. -// 2. If m < n and trans == blas.NoTrans, Dgels finds the minimum norm solution of -// A * X = B. -// 3. If m >= n and trans == blas.Trans, Dgels finds the minimum norm solution of -// A^T * X = B. -// 4. If m < n and trans == blas.Trans, Dgels finds X such that || A*X - B||_2 -// is minimized. -// Note that the least-squares solutions (cases 1 and 3) perform the minimization -// per column of B. This is not the same as finding the minimum-norm matrix. -// -// The matrix A is a general matrix of size m×n and is modified during this call. -// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, -// the elements of b specify the input matrix B. B has size m×nrhs if -// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the -// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, -// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. -// -// work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic -// otherwise. A longer work will enable blocked algorithms to be called. -// In the special case that lwork == -1, work[0] will be set to the optimal working -// length. -func (impl Implementation) Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool { - mn := min(m, n) - minwrk := mn + max(mn, nrhs) - switch { - case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case nrhs < 0: - panic(nrhsLT0) - case lda < max(1, n): - panic(badLdA) - case ldb < max(1, nrhs): - panic(badLdB) - case lwork < max(1, minwrk) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if mn == 0 || nrhs == 0 { - impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) - work[0] = 1 - return true - } - - // Find optimal block size. - var nb int - if m >= n { - nb = impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) - if trans != blas.NoTrans { - nb = max(nb, impl.Ilaenv(1, "DORMQR", "LN", m, nrhs, n, -1)) - } else { - nb = max(nb, impl.Ilaenv(1, "DORMQR", "LT", m, nrhs, n, -1)) - } - } else { - nb = impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) - if trans != blas.NoTrans { - nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LT", n, nrhs, m, -1)) - } else { - nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LN", n, nrhs, m, -1)) - } - } - wsize := max(1, mn+max(mn, nrhs)*nb) - work[0] = float64(wsize) - - if lwork == -1 { - return true - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(b) < (max(m, n)-1)*ldb+nrhs: - panic(shortB) - } - - // Scale the input matrices if they contain extreme values. - smlnum := dlamchS / dlamchP - bignum := 1 / smlnum - anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) - var iascl int - if anrm > 0 && anrm < smlnum { - impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) - iascl = 1 - } else if anrm > bignum { - impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) - } else if anrm == 0 { - // Matrix is all zeros. - impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) - return true - } - brow := m - if trans != blas.NoTrans { - brow = n - } - bnrm := impl.Dlange(lapack.MaxAbs, brow, nrhs, b, ldb, nil) - ibscl := 0 - if bnrm > 0 && bnrm < smlnum { - impl.Dlascl(lapack.General, 0, 0, bnrm, smlnum, brow, nrhs, b, ldb) - ibscl = 1 - } else if bnrm > bignum { - impl.Dlascl(lapack.General, 0, 0, bnrm, bignum, brow, nrhs, b, ldb) - ibscl = 2 - } - - // Solve the minimization problem using a QR or an LQ decomposition. - var scllen int - if m >= n { - impl.Dgeqrf(m, n, a, lda, work, work[mn:], lwork-mn) - if trans == blas.NoTrans { - impl.Dormqr(blas.Left, blas.Trans, m, nrhs, n, - a, lda, - work[:n], - b, ldb, - work[mn:], lwork-mn) - ok := impl.Dtrtrs(blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - scllen = n - } else { - ok := impl.Dtrtrs(blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - for i := n; i < m; i++ { - for j := 0; j < nrhs; j++ { - b[i*ldb+j] = 0 - } - } - impl.Dormqr(blas.Left, blas.NoTrans, m, nrhs, n, - a, lda, - work[:n], - b, ldb, - work[mn:], lwork-mn) - scllen = m - } - } else { - impl.Dgelqf(m, n, a, lda, work, work[mn:], lwork-mn) - if trans == blas.NoTrans { - ok := impl.Dtrtrs(blas.Lower, blas.NoTrans, blas.NonUnit, - m, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - for i := m; i < n; i++ { - for j := 0; j < nrhs; j++ { - b[i*ldb+j] = 0 - } - } - impl.Dormlq(blas.Left, blas.Trans, n, nrhs, m, - a, lda, - work, - b, ldb, - work[mn:], lwork-mn) - scllen = n - } else { - impl.Dormlq(blas.Left, blas.NoTrans, n, nrhs, m, - a, lda, - work, - b, ldb, - work[mn:], lwork-mn) - ok := impl.Dtrtrs(blas.Lower, blas.Trans, blas.NonUnit, - m, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - } - } - - // Adjust answer vector based on scaling. - if iascl == 1 { - impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, scllen, nrhs, b, ldb) - } - if iascl == 2 { - impl.Dlascl(lapack.General, 0, 0, anrm, bignum, scllen, nrhs, b, ldb) - } - if ibscl == 1 { - impl.Dlascl(lapack.General, 0, 0, smlnum, bnrm, scllen, nrhs, b, ldb) - } - if ibscl == 2 { - impl.Dlascl(lapack.General, 0, 0, bignum, bnrm, scllen, nrhs, b, ldb) - } - - work[0] = float64(wsize) - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go deleted file mode 100644 index 3f3ddb163..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dgeql2 computes the QL factorization of the m×n matrix A. That is, Dgeql2 -// computes Q and L such that -// A = Q * L -// where Q is an m×m orthonormal matrix and L is a lower trapezoidal matrix. -// -// Q is represented as a product of elementary reflectors, -// Q = H_{k-1} * ... * H_1 * H_0 -// where k = min(m,n) and each H_i has the form -// H_i = I - tau[i] * v_i * v_i^T -// Vector v_i has v[m-k+i+1:m] = 0, v[m-k+i] = 1, and v[:m-k+i+1] is stored on -// exit in A[0:m-k+i-1, n-k+i]. -// -// tau must have length at least min(m,n), and Dgeql2 will panic otherwise. -// -// work is temporary memory storage and must have length at least n. -// -// Dgeql2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgeql2(m, n int, a []float64, lda int, tau, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - k := min(m, n) - if k == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(work) < n: - panic(shortWork) - } - - var aii float64 - for i := k - 1; i >= 0; i-- { - // Generate elementary reflector H_i to annihilate A[0:m-k+i-1, n-k+i]. - aii, tau[i] = impl.Dlarfg(m-k+i+1, a[(m-k+i)*lda+n-k+i], a[n-k+i:], lda) - - // Apply H_i to A[0:m-k+i, 0:n-k+i-1] from the left. - a[(m-k+i)*lda+n-k+i] = 1 - impl.Dlarf(blas.Left, m-k+i+1, n-k+i, a[n-k+i:], lda, tau[i], a, lda, work) - a[(m-k+i)*lda+n-k+i] = aii - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go deleted file mode 100644 index 6949da967..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dgeqp3 computes a QR factorization with column pivoting of the -// m×n matrix A: A*P = Q*R using Level 3 BLAS. -// -// The matrix Q is represented as a product of elementary reflectors -// Q = H_0 H_1 . . . H_{k-1}, where k = min(m,n). -// Each H_i has the form -// H_i = I - tau * v * v^T -// where tau and v are real vectors with v[0:i-1] = 0 and v[i] = 1; -// v[i:m] is stored on exit in A[i:m, i], and tau in tau[i]. -// -// jpvt specifies a column pivot to be applied to A. If -// jpvt[j] is at least zero, the jth column of A is permuted -// to the front of A*P (a leading column), if jpvt[j] is -1 -// the jth column of A is a free column. If jpvt[j] < -1, Dgeqp3 -// will panic. On return, jpvt holds the permutation that was -// applied; the jth column of A*P was the jpvt[j] column of A. -// jpvt must have length n or Dgeqp3 will panic. -// -// tau holds the scalar factors of the elementary reflectors. -// It must have length min(m, n), otherwise Dgeqp3 will panic. -// -// work must have length at least max(1,lwork), and lwork must be at least -// 3*n+1, otherwise Dgeqp3 will panic. For optimal performance lwork must -// be at least 2*n+(n+1)*nb, where nb is the optimal blocksize. On return, -// work[0] will contain the optimal value of lwork. -// -// If lwork == -1, instead of performing Dgeqp3, only the optimal value of lwork -// will be stored in work[0]. -// -// Dgeqp3 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgeqp3(m, n int, a []float64, lda int, jpvt []int, tau, work []float64, lwork int) { - const ( - inb = 1 - inbmin = 2 - ixover = 3 - ) - - minmn := min(m, n) - iws := 3*n + 1 - if minmn == 0 { - iws = 1 - } - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < iws && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if minmn == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(inb, "DGEQRF", " ", m, n, -1, -1) - if lwork == -1 { - work[0] = float64(2*n + (n+1)*nb) - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(jpvt) != n: - panic(badLenJpvt) - case len(tau) < minmn: - panic(shortTau) - } - - for _, v := range jpvt { - if v < -1 || n <= v { - panic(badJpvt) - } - } - - bi := blas64.Implementation() - - // Move initial columns up front. - var nfxd int - for j := 0; j < n; j++ { - if jpvt[j] == -1 { - jpvt[j] = j - continue - } - if j != nfxd { - bi.Dswap(m, a[j:], lda, a[nfxd:], lda) - jpvt[j], jpvt[nfxd] = jpvt[nfxd], j - } else { - jpvt[j] = j - } - nfxd++ - } - - // Factorize nfxd columns. - // - // Compute the QR factorization of nfxd columns and update remaining columns. - if nfxd > 0 { - na := min(m, nfxd) - impl.Dgeqrf(m, na, a, lda, tau, work, lwork) - iws = max(iws, int(work[0])) - if na < n { - impl.Dormqr(blas.Left, blas.Trans, m, n-na, na, a, lda, tau[:na], a[na:], lda, - work, lwork) - iws = max(iws, int(work[0])) - } - } - - if nfxd >= minmn { - work[0] = float64(iws) - return - } - - // Factorize free columns. - sm := m - nfxd - sn := n - nfxd - sminmn := minmn - nfxd - - // Determine the block size. - nb = impl.Ilaenv(inb, "DGEQRF", " ", sm, sn, -1, -1) - nbmin := 2 - nx := 0 - - if 1 < nb && nb < sminmn { - // Determine when to cross over from blocked to unblocked code. - nx = max(0, impl.Ilaenv(ixover, "DGEQRF", " ", sm, sn, -1, -1)) - - if nx < sminmn { - // Determine if workspace is large enough for blocked code. - minws := 2*sn + (sn+1)*nb - iws = max(iws, minws) - if lwork < minws { - // Not enough workspace to use optimal nb. Reduce - // nb and determine the minimum value of nb. - nb = (lwork - 2*sn) / (sn + 1) - nbmin = max(2, impl.Ilaenv(inbmin, "DGEQRF", " ", sm, sn, -1, -1)) - } - } - } - - // Initialize partial column norms. - // The first n elements of work store the exact column norms. - for j := nfxd; j < n; j++ { - work[j] = bi.Dnrm2(sm, a[nfxd*lda+j:], lda) - work[n+j] = work[j] - } - j := nfxd - if nbmin <= nb && nb < sminmn && nx < sminmn { - // Use blocked code initially. - - // Compute factorization. - var fjb int - for topbmn := minmn - nx; j < topbmn; j += fjb { - jb := min(nb, topbmn-j) - - // Factorize jb columns among columns j:n. - fjb = impl.Dlaqps(m, n-j, j, jb, a[j:], lda, jpvt[j:], tau[j:], - work[j:n], work[j+n:2*n], work[2*n:2*n+jb], work[2*n+jb:], jb) - } - } - - // Use unblocked code to factor the last or only block. - if j < minmn { - impl.Dlaqp2(m, n-j, j, a[j:], lda, jpvt[j:], tau[j:], - work[j:n], work[j+n:2*n], work[2*n:]) - } - - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go deleted file mode 100644 index 3e35d7e2f..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dgeqr2 computes a QR factorization of the m×n matrix A. -// -// In a QR factorization, Q is an m×m orthonormal matrix, and R is an -// upper triangular m×n matrix. -// -// A is modified to contain the information to construct Q and R. -// The upper triangle of a contains the matrix R. The lower triangular elements -// (not including the diagonal) contain the elementary reflectors. tau is modified -// to contain the reflector scales. tau must have length at least min(m,n), and -// this function will panic otherwise. -// -// The ith elementary reflector can be explicitly constructed by first extracting -// the -// v[j] = 0 j < i -// v[j] = 1 j == i -// v[j] = a[j*lda+i] j > i -// and computing H_i = I - tau[i] * v * v^T. -// -// The orthonormal matrix Q can be constructed from a product of these elementary -// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). -// -// work is temporary storage of length at least n and this function will panic otherwise. -// -// Dgeqr2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgeqr2(m, n int, a []float64, lda int, tau, work []float64) { - // TODO(btracey): This is oriented such that columns of a are eliminated. - // This likely could be re-arranged to take better advantage of row-major - // storage. - - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case len(work) < n: - panic(shortWork) - } - - // Quick return if possible. - k := min(m, n) - if k == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - } - - for i := 0; i < k; i++ { - // Generate elementary reflector H_i. - a[i*lda+i], tau[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min((i+1), m-1)*lda+i:], lda) - if i < n-1 { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(blas.Left, m-i, n-i-1, - a[i*lda+i:], lda, - tau[i], - a[i*lda+i+1:], lda, - work) - a[i*lda+i] = aii - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go deleted file mode 100644 index 300f8eea4..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dgeqrf computes the QR factorization of the m×n matrix A using a blocked -// algorithm. See the documentation for Dgeqr2 for a description of the -// parameters at entry and exit. -// -// work is temporary storage, and lwork specifies the usable memory length. -// The length of work must be at least max(1, lwork) and lwork must be -1 -// or at least n, otherwise this function will panic. -// Dgeqrf is a blocked QR factorization, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dgeqrf, -// the optimal work length will be stored into work[0]. -// -// tau must have length at least min(m,n), and this function will panic otherwise. -func (impl Implementation) Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, n) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - k := min(m, n) - if k == 0 { - work[0] = 1 - return - } - - // nb is the optimal blocksize, i.e. the number of columns transformed at a time. - nb := impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) - if lwork == -1 { - work[0] = float64(n * nb) - return - } - - if len(a) < (m-1)*lda+n { - panic(shortA) - } - if len(tau) < k { - panic(shortTau) - } - - nbmin := 2 // Minimal block size. - var nx int // Use unblocked (unless changed in the next for loop) - iws := n - // Only consider blocked if the suggested block size is > 1 and the - // number of rows or columns is sufficiently large. - if 1 < nb && nb < k { - // nx is the block size at which the code switches from blocked - // to unblocked. - nx = max(0, impl.Ilaenv(3, "DGEQRF", " ", m, n, -1, -1)) - if k > nx { - iws = n * nb - if lwork < iws { - // Not enough workspace to use the optimal block - // size. Get the minimum block size instead. - nb = lwork / n - nbmin = max(2, impl.Ilaenv(2, "DGEQRF", " ", m, n, -1, -1)) - } - } - } - - // Compute QR using a blocked algorithm. - var i int - if nbmin <= nb && nb < k && nx < k { - ldwork := nb - for i = 0; i < k-nx; i += nb { - ib := min(k-i, nb) - // Compute the QR factorization of the current block. - impl.Dgeqr2(m-i, ib, a[i*lda+i:], lda, tau[i:], work) - if i+ib < n { - // Form the triangular factor of the block reflector and apply H^T - // In Dlarft, work becomes the T matrix. - impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - work, ldwork) - impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, - m-i, n-i-ib, ib, - a[i*lda+i:], lda, - work, ldwork, - a[i*lda+i+ib:], lda, - work[ib*ldwork:], ldwork) - } - } - } - // Call unblocked code on the remaining columns. - if i < k { - impl.Dgeqr2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) - } - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go deleted file mode 100644 index 60dac973a..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dgerq2 computes an RQ factorization of the m×n matrix A, -// A = R * Q. -// On exit, if m <= n, the upper triangle of the subarray -// A[0:m, n-m:n] contains the m×m upper triangular matrix R. -// If m >= n, the elements on and above the (m-n)-th subdiagonal -// contain the m×n upper trapezoidal matrix R. -// The remaining elements, with tau, represent the -// orthogonal matrix Q as a product of min(m,n) elementary -// reflectors. -// -// The matrix Q is represented as a product of elementary reflectors -// Q = H_0 H_1 . . . H_{min(m,n)-1}. -// Each H(i) has the form -// H_i = I - tau_i * v * v^T -// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], -// v[n-k+i:n] = 0 and v[n-k+i] = 1. -// -// tau must have length min(m,n) and work must have length m, otherwise -// Dgerq2 will panic. -// -// Dgerq2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgerq2(m, n int, a []float64, lda int, tau, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case len(work) < m: - panic(shortWork) - } - - // Quick return if possible. - k := min(m, n) - if k == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - } - - for i := k - 1; i >= 0; i-- { - // Generate elementary reflector H[i] to annihilate - // A[m-k+i, 0:n-k+i-1]. - mki := m - k + i - nki := n - k + i - var aii float64 - aii, tau[i] = impl.Dlarfg(nki+1, a[mki*lda+nki], a[mki*lda:], 1) - - // Apply H[i] to A[0:m-k+i-1, 0:n-k+i] from the right. - a[mki*lda+nki] = 1 - impl.Dlarf(blas.Right, mki, nki+1, a[mki*lda:], 1, tau[i], a, lda, work) - a[mki*lda+nki] = aii - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go deleted file mode 100644 index 9b4aa050e..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dgerqf computes an RQ factorization of the m×n matrix A, -// A = R * Q. -// On exit, if m <= n, the upper triangle of the subarray -// A[0:m, n-m:n] contains the m×m upper triangular matrix R. -// If m >= n, the elements on and above the (m-n)-th subdiagonal -// contain the m×n upper trapezoidal matrix R. -// The remaining elements, with tau, represent the -// orthogonal matrix Q as a product of min(m,n) elementary -// reflectors. -// -// The matrix Q is represented as a product of elementary reflectors -// Q = H_0 H_1 . . . H_{min(m,n)-1}. -// Each H(i) has the form -// H_i = I - tau_i * v * v^T -// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], -// v[n-k+i:n] = 0 and v[n-k+i] = 1. -// -// tau must have length min(m,n), work must have length max(1, lwork), -// and lwork must be -1 or at least max(1, m), otherwise Dgerqf will panic. -// On exit, work[0] will contain the optimal length for work. -// -// Dgerqf is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dgerqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, m) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - k := min(m, n) - if k == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(1, "DGERQF", " ", m, n, -1, -1) - if lwork == -1 { - work[0] = float64(m * nb) - return - } - - if len(a) < (m-1)*lda+n { - panic(shortA) - } - if len(tau) != k { - panic(badLenTau) - } - - nbmin := 2 - nx := 1 - iws := m - var ldwork int - if 1 < nb && nb < k { - // Determine when to cross over from blocked to unblocked code. - nx = max(0, impl.Ilaenv(3, "DGERQF", " ", m, n, -1, -1)) - if nx < k { - // Determine whether workspace is large enough for blocked code. - iws = m * nb - if lwork < iws { - // Not enough workspace to use optimal nb. Reduce - // nb and determine the minimum value of nb. - nb = lwork / m - nbmin = max(2, impl.Ilaenv(2, "DGERQF", " ", m, n, -1, -1)) - } - ldwork = nb - } - } - - var mu, nu int - if nbmin <= nb && nb < k && nx < k { - // Use blocked code initially. - // The last kk rows are handled by the block method. - ki := ((k - nx - 1) / nb) * nb - kk := min(k, ki+nb) - - var i int - for i = k - kk + ki; i >= k-kk; i -= nb { - ib := min(k-i, nb) - - // Compute the RQ factorization of the current block - // A[m-k+i:m-k+i+ib-1, 0:n-k+i+ib-1]. - impl.Dgerq2(ib, n-k+i+ib, a[(m-k+i)*lda:], lda, tau[i:], work) - if m-k+i > 0 { - // Form the triangular factor of the block reflector - // H = H_{i+ib-1} . . . H_{i+1} H_i. - impl.Dlarft(lapack.Backward, lapack.RowWise, - n-k+i+ib, ib, a[(m-k+i)*lda:], lda, tau[i:], - work, ldwork) - - // Apply H to A[0:m-k+i-1, 0:n-k+i+ib-1] from the right. - impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Backward, lapack.RowWise, - m-k+i, n-k+i+ib, ib, a[(m-k+i)*lda:], lda, - work, ldwork, - a, lda, - work[ib*ldwork:], ldwork) - } - } - mu = m - k + i + nb - nu = n - k + i + nb - } else { - mu = m - nu = n - } - - // Use unblocked code to factor the last or only block. - if mu > 0 && nu > 0 { - impl.Dgerq2(mu, nu, a, lda, tau, work) - } - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go deleted file mode 100644 index 136f683e4..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go +++ /dev/null @@ -1,1374 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -const noSVDO = "dgesvd: not coded for overwrite" - -// Dgesvd computes the singular value decomposition of the input matrix A. -// -// The singular value decomposition is -// A = U * Sigma * V^T -// where Sigma is an m×n diagonal matrix containing the singular values of A, -// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first -// min(m,n) columns of U and V are the left and right singular vectors of A -// respectively. -// -// jobU and jobVT are options for computing the singular vectors. The behavior -// is as follows -// jobU == lapack.SVDAll All m columns of U are returned in u -// jobU == lapack.SVDStore The first min(m,n) columns are returned in u -// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a -// jobU == lapack.SVDNone The columns of U are not computed. -// The behavior is the same for jobVT and the rows of V^T. At most one of jobU -// and jobVT can equal lapack.SVDOverwrite, and Dgesvd will panic otherwise. -// -// On entry, a contains the data for the m×n matrix A. During the call to Dgesvd -// the data is overwritten. On exit, A contains the appropriate singular vectors -// if either job is lapack.SVDOverwrite. -// -// s is a slice of length at least min(m,n) and on exit contains the singular -// values in decreasing order. -// -// u contains the left singular vectors on exit, stored column-wise. If -// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDStore u is -// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is -// not used. -// -// vt contains the left singular vectors on exit, stored row-wise. If -// jobV == lapack.SVDAll, vt is of size n×n. If jobVT == lapack.SVDStore vt is -// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is -// not used. -// -// work is a slice for storing temporary memory, and lwork is the usable size of -// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). -// If lwork == -1, instead of performing Dgesvd, the optimal work length will be -// stored into work[0]. Dgesvd will panic if the working memory has insufficient -// storage. -// -// Dgesvd returns whether the decomposition successfully completed. -func (impl Implementation) Dgesvd(jobU, jobVT lapack.SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) { - if jobU == lapack.SVDOverwrite || jobVT == lapack.SVDOverwrite { - panic(noSVDO) - } - - wantua := jobU == lapack.SVDAll - wantus := jobU == lapack.SVDStore - wantuas := wantua || wantus - wantuo := jobU == lapack.SVDOverwrite - wantun := jobU == lapack.SVDNone - if !(wantua || wantus || wantuo || wantun) { - panic(badSVDJob) - } - - wantva := jobVT == lapack.SVDAll - wantvs := jobVT == lapack.SVDStore - wantvas := wantva || wantvs - wantvo := jobVT == lapack.SVDOverwrite - wantvn := jobVT == lapack.SVDNone - if !(wantva || wantvs || wantvo || wantvn) { - panic(badSVDJob) - } - - if wantuo && wantvo { - panic(bothSVDOver) - } - - minmn := min(m, n) - minwork := 1 - if minmn > 0 { - minwork = max(3*minmn+max(m, n), 5*minmn) - } - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case ldu < 1, wantua && ldu < m, wantus && ldu < minmn: - panic(badLdU) - case ldvt < 1 || (wantvas && ldvt < n): - panic(badLdVT) - case lwork < minwork && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if minmn == 0 { - work[0] = 1 - return true - } - - // Compute optimal workspace size for subroutines. - opts := string(jobU) + string(jobVT) - mnthr := impl.Ilaenv(6, "DGESVD", opts, m, n, 0, 0) - maxwrk := 1 - var wrkbl, bdspac int - if m >= n { - bdspac = 5 * n - impl.Dgeqrf(m, n, a, lda, nil, work, -1) - lwork_dgeqrf := int(work[0]) - - impl.Dorgqr(m, n, n, a, lda, nil, work, -1) - lwork_dorgqr_n := int(work[0]) - impl.Dorgqr(m, m, n, a, lda, nil, work, -1) - lwork_dorgqr_m := int(work[0]) - - impl.Dgebrd(n, n, a, lda, s, nil, nil, nil, work, -1) - lwork_dgebrd := int(work[0]) - - impl.Dorgbr(lapack.GeneratePT, n, n, n, a, lda, nil, work, -1) - lwork_dorgbr_p := int(work[0]) - - impl.Dorgbr(lapack.GenerateQ, n, n, n, a, lda, nil, work, -1) - lwork_dorgbr_q := int(work[0]) - - if m >= mnthr { - if wantun { - // Path 1 (m much larger than n, jobU == None) - maxwrk = n + lwork_dgeqrf - maxwrk = max(maxwrk, 3*n+lwork_dgebrd) - if wantvo || wantvas { - maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) - } - maxwrk = max(maxwrk, bdspac) - } else if wantuo && wantvn { - // Path 2 (m much larger than n, jobU == Overwrite, jobVT == None) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_n) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = max(n*n+wrkbl, n*n+m*n+n) - } else if wantuo && wantvas { - // Path 3 (m much larger than n, jobU == Overwrite, jobVT == Store or All) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_n) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = max(n*n+wrkbl, n*n+m*n+n) - } else if wantus && wantvn { - // Path 4 (m much larger than n, jobU == Store, jobVT == None) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_n) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = n*n + wrkbl - } else if wantus && wantvo { - // Path 5 (m much larger than n, jobU == Store, jobVT == Overwrite) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_n) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = 2*n*n + wrkbl - } else if wantus && wantvas { - // Path 6 (m much larger than n, jobU == Store, jobVT == Store or All) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_n) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = n*n + wrkbl - } else if wantua && wantvn { - // Path 7 (m much larger than n, jobU == All, jobVT == None) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_m) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = n*n + wrkbl - } else if wantua && wantvo { - // Path 8 (m much larger than n, jobU == All, jobVT == Overwrite) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_m) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = 2*n*n + wrkbl - } else if wantua && wantvas { - // Path 9 (m much larger than n, jobU == All, jobVT == Store or All) - wrkbl = n + lwork_dgeqrf - wrkbl = max(wrkbl, n+lwork_dorgqr_m) - wrkbl = max(wrkbl, 3*n+lwork_dgebrd) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) - wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = n*n + wrkbl - } - } else { - // Path 10 (m at least n, but not much larger) - impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) - lwork_dgebrd := int(work[0]) - maxwrk = 3*n + lwork_dgebrd - if wantus || wantuo { - impl.Dorgbr(lapack.GenerateQ, m, n, n, a, lda, nil, work, -1) - lwork_dorgbr_q = int(work[0]) - maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) - } - if wantua { - impl.Dorgbr(lapack.GenerateQ, m, m, n, a, lda, nil, work, -1) - lwork_dorgbr_q := int(work[0]) - maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) - } - if !wantvn { - maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) - } - maxwrk = max(maxwrk, bdspac) - } - } else { - bdspac = 5 * m - - impl.Dgelqf(m, n, a, lda, nil, work, -1) - lwork_dgelqf := int(work[0]) - - impl.Dorglq(n, n, m, nil, n, nil, work, -1) - lwork_dorglq_n := int(work[0]) - impl.Dorglq(m, n, m, a, lda, nil, work, -1) - lwork_dorglq_m := int(work[0]) - - impl.Dgebrd(m, m, a, lda, s, nil, nil, nil, work, -1) - lwork_dgebrd := int(work[0]) - - impl.Dorgbr(lapack.GeneratePT, m, m, m, a, n, nil, work, -1) - lwork_dorgbr_p := int(work[0]) - - impl.Dorgbr(lapack.GenerateQ, m, m, m, a, n, nil, work, -1) - lwork_dorgbr_q := int(work[0]) - - if n >= mnthr { - if wantvn { - // Path 1t (n much larger than m, jobVT == None) - maxwrk = m + lwork_dgelqf - maxwrk = max(maxwrk, 3*m+lwork_dgebrd) - if wantuo || wantuas { - maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) - } - maxwrk = max(maxwrk, bdspac) - } else if wantvo && wantun { - // Path 2t (n much larger than m, jobU == None, jobVT == Overwrite) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_m) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = max(m*m+wrkbl, m*m+m*n+m) - } else if wantvo && wantuas { - // Path 3t (n much larger than m, jobU == Store or All, jobVT == Overwrite) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_m) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = max(m*m+wrkbl, m*m+m*n+m) - } else if wantvs && wantun { - // Path 4t (n much larger than m, jobU == None, jobVT == Store) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_m) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = m*m + wrkbl - } else if wantvs && wantuo { - // Path 5t (n much larger than m, jobU == Overwrite, jobVT == Store) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_m) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = 2*m*m + wrkbl - } else if wantvs && wantuas { - // Path 6t (n much larger than m, jobU == Store or All, jobVT == Store) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_m) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = m*m + wrkbl - } else if wantva && wantun { - // Path 7t (n much larger than m, jobU== None, jobVT == All) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_n) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, bdspac) - maxwrk = m*m + wrkbl - } else if wantva && wantuo { - // Path 8t (n much larger than m, jobU == Overwrite, jobVT == All) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_n) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = 2*m*m + wrkbl - } else if wantva && wantuas { - // Path 9t (n much larger than m, jobU == Store or All, jobVT == All) - wrkbl = m + lwork_dgelqf - wrkbl = max(wrkbl, m+lwork_dorglq_n) - wrkbl = max(wrkbl, 3*m+lwork_dgebrd) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) - wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) - wrkbl = max(wrkbl, bdspac) - maxwrk = m*m + wrkbl - } - } else { - // Path 10t (n greater than m, but not much larger) - impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) - lwork_dgebrd = int(work[0]) - maxwrk = 3*m + lwork_dgebrd - if wantvs || wantvo { - impl.Dorgbr(lapack.GeneratePT, m, n, m, a, n, nil, work, -1) - lwork_dorgbr_p = int(work[0]) - maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) - } - if wantva { - impl.Dorgbr(lapack.GeneratePT, n, n, m, a, n, nil, work, -1) - lwork_dorgbr_p = int(work[0]) - maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) - } - if !wantun { - maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) - } - maxwrk = max(maxwrk, bdspac) - } - } - - maxwrk = max(maxwrk, minwork) - if lwork == -1 { - work[0] = float64(maxwrk) - return true - } - - if len(a) < (m-1)*lda+n { - panic(shortA) - } - if len(s) < minmn { - panic(shortS) - } - if (len(u) < (m-1)*ldu+m && wantua) || (len(u) < (m-1)*ldu+minmn && wantus) { - panic(shortU) - } - if (len(vt) < (n-1)*ldvt+n && wantva) || (len(vt) < (minmn-1)*ldvt+n && wantvs) { - panic(shortVT) - } - - // Perform decomposition. - eps := dlamchE - smlnum := math.Sqrt(dlamchS) / eps - bignum := 1 / smlnum - - // Scale A if max element outside range [smlnum, bignum]. - anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) - var iscl bool - if anrm > 0 && anrm < smlnum { - iscl = true - impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) - } else if anrm > bignum { - iscl = true - impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) - } - - bi := blas64.Implementation() - var ie int - if m >= n { - // If A has sufficiently more rows than columns, use the QR decomposition. - if m >= mnthr { - // m >> n - if wantun { - // Path 1. - itau := 0 - iwork := itau + n - - // Compute A = Q * R. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - - // Zero out below R. - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) - ie = 0 - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - // Bidiagonalize R in A. - impl.Dgebrd(n, n, a, lda, s, work[ie:], work[itauq:], - work[itaup:], work[iwork:], lwork-iwork) - ncvt := 0 - if wantvo || wantvas { - impl.Dorgbr(lapack.GeneratePT, n, n, n, a, lda, work[itaup:], - work[iwork:], lwork-iwork) - ncvt = n - } - iwork = ie + n - - // Perform bidiagonal QR iteration computing right singular vectors - // of A in A if desired. - ok = impl.Dbdsqr(blas.Upper, n, ncvt, 0, 0, s, work[ie:], - a, lda, work, 1, work, 1, work[iwork:]) - - // If right singular vectors desired in VT, copy them there. - if wantvas { - impl.Dlacpy(blas.All, n, n, a, lda, vt, ldvt) - } - } else if wantuo && wantvn { - // Path 2 - panic(noSVDO) - } else if wantuo && wantvas { - // Path 3 - panic(noSVDO) - } else if wantus { - if wantvn { - // Path 4 - if lwork >= n*n+max(4*n, bdspac) { - // Sufficient workspace for a fast algorithm. - ir := 0 - var ldworkr int - if lwork >= wrkbl+lda*n { - ldworkr = lda - } else { - ldworkr = n - } - itau := ir + ldworkr*n - iwork := itau + n - // Compute A = Q * R. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - - // Copy R to work[ir:], zeroing out below it. - impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) - - // Generate Q in A. - impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - ie := itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Bidiagonalize R in work[ir:]. - impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Generate left vectors bidiagonalizing R in work[ir:]. - impl.Dorgbr(lapack.GenerateQ, n, n, n, work[ir:], ldworkr, - work[itauq:], work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, compuing left singular - // vectors of R in work[ir:]. - ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, - work[ir:], ldworkr, work, 1, work[iwork:]) - - // Multiply Q in A by left singular vectors of R in - // work[ir:], storing result in U. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, - work[ir:], ldworkr, 0, u, ldu) - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + n - - // Compute A = Q*R, copying result to U. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) - - // Generate Q in U. - impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) - ie := itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Zero out below R in A. - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) - - // Bidiagonalize R in A. - impl.Dgebrd(n, n, a, lda, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Multiply Q in U by left vectors bidiagonalizing R. - impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, - a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left - // singular vectors of A in U. - ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], work, 1, - u, ldu, work, 1, work[iwork:]) - } - } else if wantvo { - // Path 5 - panic(noSVDO) - } else if wantvas { - // Path 6 - if lwork >= n*n+max(4*n, bdspac) { - // Sufficient workspace for a fast algorithm. - iu := 0 - var ldworku int - if lwork >= wrkbl+lda*n { - ldworku = lda - } else { - ldworku = n - } - itau := iu + ldworku*n - iwork := itau + n - - // Compute A = Q * R. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - // Copy R to work[iu:], zeroing out below it. - impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) - - // Generate Q in A. - impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - - ie := itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Bidiagonalize R in work[iu:], copying result to VT. - impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) - - // Generate left bidiagonalizing vectors in work[iu:]. - impl.Dorgbr(lapack.GenerateQ, n, n, n, work[iu:], ldworku, - work[itauq:], work[iwork:], lwork-iwork) - - // Generate right bidiagonalizing vectors in VT. - impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, - work[itaup:], work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left singular - // vectors of R in work[iu:], and computing right singular - // vectors of R in VT. - ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], - vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) - - // Multiply Q in A by left singular vectors of R in - // work[iu:], storing result in U. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, - work[iu:], ldworku, 0, u, ldu) - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + n - - // Compute A = Q * R, copying result to U. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) - - // Generate Q in U. - impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) - - // Copy R to VT, zeroing out below it. - impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) - - ie := itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Bidiagonalize R in VT. - impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Multiply Q in U by left bidiagonalizing vectors in VT. - impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, - vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) - - // Generate right bidiagonalizing vectors in VT. - impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, - work[itaup:], work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left singular - // vectors of A in U and computing right singular vectors - // of A in VT. - ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], - vt, ldvt, u, ldu, work, 1, work[iwork:]) - } - } - } else if wantua { - if wantvn { - // Path 7 - if lwork >= n*n+max(max(n+m, 4*n), bdspac) { - // Sufficient workspace for a fast algorithm. - ir := 0 - var ldworkr int - if lwork >= wrkbl+lda*n { - ldworkr = lda - } else { - ldworkr = n - } - itau := ir + ldworkr*n - iwork := itau + n - - // Compute A = Q*R, copying result to U. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) - - // Copy R to work[ir:], zeroing out below it. - impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) - - // Generate Q in U. - impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) - ie := itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Bidiagonalize R in work[ir:]. - impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Generate left bidiagonalizing vectors in work[ir:]. - impl.Dorgbr(lapack.GenerateQ, n, n, n, work[ir:], ldworkr, - work[itauq:], work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left singular - // vectors of R in work[ir:]. - ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, - work[ir:], ldworkr, work, 1, work[iwork:]) - - // Multiply Q in U by left singular vectors of R in - // work[ir:], storing result in A. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, u, ldu, - work[ir:], ldworkr, 0, a, lda) - - // Copy left singular vectors of A from A to U. - impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + n - - // Compute A = Q*R, copying result to U. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) - - // Generate Q in U. - impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) - ie := itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Zero out below R in A. - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) - - // Bidiagonalize R in A. - impl.Dgebrd(n, n, a, lda, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Multiply Q in U by left bidiagonalizing vectors in A. - impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, - a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left - // singular vectors of A in U. - ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], - work, 1, u, ldu, work, 1, work[iwork:]) - } - } else if wantvo { - // Path 8. - panic(noSVDO) - } else if wantvas { - // Path 9. - if lwork >= n*n+max(max(n+m, 4*n), bdspac) { - // Sufficient workspace for a fast algorithm. - iu := 0 - var ldworku int - if lwork >= wrkbl+lda*n { - ldworku = lda - } else { - ldworku = n - } - itau := iu + ldworku*n - iwork := itau + n - - // Compute A = Q * R, copying result to U. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) - - // Generate Q in U. - impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) - - // Copy R to work[iu:], zeroing out below it. - impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) - - ie = itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Bidiagonalize R in work[iu:], copying result to VT. - impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) - - // Generate left bidiagonalizing vectors in work[iu:]. - impl.Dorgbr(lapack.GenerateQ, n, n, n, work[iu:], ldworku, - work[itauq:], work[iwork:], lwork-iwork) - - // Generate right bidiagonalizing vectors in VT. - impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, - work[itaup:], work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left singular - // vectors of R in work[iu:] and computing right - // singular vectors of R in VT. - ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], - vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) - - // Multiply Q in U by left singular vectors of R in - // work[iu:], storing result in A. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, - u, ldu, work[iu:], ldworku, 0, a, lda) - - // Copy left singular vectors of A from A to U. - impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) - - /* - // Bidiagonalize R in VT. - impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Multiply Q in U by left bidiagonalizing vectors in VT. - impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, - m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) - - // Generate right bidiagonalizing vectors in VT. - impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, - work[itaup:], work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left singular - // vectors of A in U and computing right singular vectors - // of A in VT. - ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], - vt, ldvt, u, ldu, work, 1, work[iwork:]) - */ - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + n - - // Compute A = Q*R, copying result to U. - impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) - - // Generate Q in U. - impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) - - // Copy R from A to VT, zeroing out below it. - impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) - if n > 1 { - impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) - } - - ie := itau - itauq := ie + n - itaup := itauq + n - iwork = itaup + n - - // Bidiagonalize R in VT. - impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Multiply Q in U by left bidiagonalizing vectors in VT. - impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, - m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) - - // Generate right bidiagonizing vectors in VT. - impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, - work[itaup:], work[iwork:], lwork-iwork) - iwork = ie + n - - // Perform bidiagonal QR iteration, computing left singular - // vectors of A in U and computing right singular vectors - // of A in VT. - ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], - vt, ldvt, u, ldu, work, 1, work[iwork:]) - } - } - } - } else { - // Path 10. - // M at least N, but not much larger. - ie = 0 - itauq := ie + n - itaup := itauq + n - iwork := itaup + n - - // Bidiagonalize A. - impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], - work[itaup:], work[iwork:], lwork-iwork) - if wantuas { - // Left singular vectors are desired in U. Copy result to U and - // generate left biadiagonalizing vectors in U. - impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) - var ncu int - if wantus { - ncu = n - } - if wantua { - ncu = m - } - impl.Dorgbr(lapack.GenerateQ, m, ncu, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) - } - if wantvas { - // Right singular vectors are desired in VT. Copy result to VT and - // generate left biadiagonalizing vectors in VT. - impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) - impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) - } - if wantuo { - panic(noSVDO) - } - if wantvo { - panic(noSVDO) - } - iwork = ie + n - var nru, ncvt int - if wantuas || wantuo { - nru = m - } - if wantun { - nru = 0 - } - if wantvas || wantvo { - ncvt = n - } - if wantvn { - ncvt = 0 - } - if !wantuo && !wantvo { - // Perform bidiagonal QR iteration, if desired, computing left - // singular vectors in U and right singular vectors in VT. - ok = impl.Dbdsqr(blas.Upper, n, ncvt, nru, 0, s, work[ie:], - vt, ldvt, u, ldu, work, 1, work[iwork:]) - } else { - // There will be two branches when the implementation is complete. - panic(noSVDO) - } - } - } else { - // A has more columns than rows. If A has sufficiently more columns than - // rows, first reduce using the LQ decomposition. - if n >= mnthr { - // n >> m. - if wantvn { - // Path 1t. - itau := 0 - iwork := itau + m - - // Compute A = L*Q. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - - // Zero out above L. - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) - ie := 0 - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Bidiagonalize L in A. - impl.Dgebrd(m, m, a, lda, s, work[ie:itauq], - work[itauq:itaup], work[itaup:iwork], work[iwork:], lwork-iwork) - if wantuo || wantuas { - impl.Dorgbr(lapack.GenerateQ, m, m, m, a, lda, - work[itauq:], work[iwork:], lwork-iwork) - } - iwork = ie + m - nru := 0 - if wantuo || wantuas { - nru = m - } - - // Perform bidiagonal QR iteration, computing left singular vectors - // of A in A if desired. - ok = impl.Dbdsqr(blas.Upper, m, 0, nru, 0, s, work[ie:], - work, 1, a, lda, work, 1, work[iwork:]) - - // If left singular vectors desired in U, copy them there. - if wantuas { - impl.Dlacpy(blas.All, m, m, a, lda, u, ldu) - } - } else if wantvo && wantun { - // Path 2t. - panic(noSVDO) - } else if wantvo && wantuas { - // Path 3t. - panic(noSVDO) - } else if wantvs { - if wantun { - // Path 4t. - if lwork >= m*m+max(4*m, bdspac) { - // Sufficient workspace for a fast algorithm. - ir := 0 - var ldworkr int - if lwork >= wrkbl+lda*m { - ldworkr = lda - } else { - ldworkr = m - } - itau := ir + ldworkr*m - iwork := itau + m - - // Compute A = L*Q. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - - // Copy L to work[ir:], zeroing out above it. - impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) - - // Generate Q in A. - impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) - ie := itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Bidiagonalize L in work[ir:]. - impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Generate right vectors bidiagonalizing L in work[ir:]. - impl.Dorgbr(lapack.GeneratePT, m, m, m, work[ir:], ldworkr, - work[itaup:], work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing right singular - // vectors of L in work[ir:]. - ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], - work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) - - // Multiply right singular vectors of L in work[ir:] by - // Q in A, storing result in VT. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, - work[ir:], ldworkr, a, lda, 0, vt, ldvt) - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + m - - // Compute A = L*Q. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - - // Copy result to VT. - impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) - - // Generate Q in VT. - impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) - ie := itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Zero out above L in A. - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) - - // Bidiagonalize L in A. - impl.Dgebrd(m, m, a, lda, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Multiply right vectors bidiagonalizing L by Q in VT. - impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, - a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing right - // singular vectors of A in VT. - ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], - vt, ldvt, work, 1, work, 1, work[iwork:]) - } - } else if wantuo { - // Path 5t. - panic(noSVDO) - } else if wantuas { - // Path 6t. - if lwork >= m*m+max(4*m, bdspac) { - // Sufficient workspace for a fast algorithm. - iu := 0 - var ldworku int - if lwork >= wrkbl+lda*m { - ldworku = lda - } else { - ldworku = m - } - itau := iu + ldworku*m - iwork := itau + m - - // Compute A = L*Q. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - - // Copy L to work[iu:], zeroing out above it. - impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) - - // Generate Q in A. - impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) - ie := itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Bidiagonalize L in work[iu:], copying result to U. - impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) - - // Generate right bidiagionalizing vectors in work[iu:]. - impl.Dorgbr(lapack.GeneratePT, m, m, m, work[iu:], ldworku, - work[itaup:], work[iwork:], lwork-iwork) - - // Generate left bidiagonalizing vectors in U. - impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing left singular - // vectors of L in U and computing right singular vectors of - // L in work[iu:]. - ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], - work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) - - // Multiply right singular vectors of L in work[iu:] by - // Q in A, storing result in VT. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, - work[iu:], ldworku, a, lda, 0, vt, ldvt) - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + m - - // Compute A = L*Q, copying result to VT. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) - - // Generate Q in VT. - impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) - - // Copy L to U, zeroing out above it. - impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) - - ie := itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Bidiagonalize L in U. - impl.Dgebrd(m, m, u, ldu, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Multiply right bidiagonalizing vectors in U by Q in VT. - impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, - u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) - - // Generate left bidiagonalizing vectors in U. - impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing left singular - // vectors of A in U and computing right singular vectors - // of A in VT. - ok = impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], vt, ldvt, - u, ldu, work, 1, work[iwork:]) - } - } - } else if wantva { - if wantun { - // Path 7t. - if lwork >= m*m+max(max(n+m, 4*m), bdspac) { - // Sufficient workspace for a fast algorithm. - ir := 0 - var ldworkr int - if lwork >= wrkbl+lda*m { - ldworkr = lda - } else { - ldworkr = m - } - itau := ir + ldworkr*m - iwork := itau + m - - // Compute A = L*Q, copying result to VT. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) - - // Copy L to work[ir:], zeroing out above it. - impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) - - // Generate Q in VT. - impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) - - ie := itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Bidiagonalize L in work[ir:]. - impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - - // Generate right bidiagonalizing vectors in work[ir:]. - impl.Dorgbr(lapack.GeneratePT, m, m, m, work[ir:], ldworkr, - work[itaup:], work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing right - // singular vectors of L in work[ir:]. - ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], - work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) - - // Multiply right singular vectors of L in work[ir:] by - // Q in VT, storing result in A. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, - work[ir:], ldworkr, vt, ldvt, 0, a, lda) - - // Copy right singular vectors of A from A to VT. - impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + m - // Compute A = L * Q, copying result to VT. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) - - // Generate Q in VT. - impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) - - ie := itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Zero out above L in A. - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) - - // Bidiagonalize L in A. - impl.Dgebrd(m, m, a, lda, s, work[ie:], work[itauq:], - work[itaup:], work[iwork:], lwork-iwork) - - // Multiply right bidiagonalizing vectors in A by Q in VT. - impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, - a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing right singular - // vectors of A in VT. - ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], - vt, ldvt, work, 1, work, 1, work[iwork:]) - } - } else if wantuo { - panic(noSVDO) - } else if wantuas { - // Path 9t. - if lwork >= m*m+max(max(m+n, 4*m), bdspac) { - // Sufficient workspace for a fast algorithm. - iu := 0 - - var ldworku int - if lwork >= wrkbl+lda*m { - ldworku = lda - } else { - ldworku = m - } - itau := iu + ldworku*m - iwork := itau + m - - // Generate A = L * Q copying result to VT. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) - - // Generate Q in VT. - impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) - - // Copy L to work[iu:], zeroing out above it. - impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) - ie = itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Bidiagonalize L in work[iu:], copying result to U. - impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], - work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) - - // Generate right bidiagonalizing vectors in work[iu:]. - impl.Dorgbr(lapack.GeneratePT, m, m, m, work[iu:], ldworku, - work[itaup:], work[iwork:], lwork-iwork) - - // Generate left bidiagonalizing vectors in U. - impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing left singular - // vectors of L in U and computing right singular vectors - // of L in work[iu:]. - ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], - work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) - - // Multiply right singular vectors of L in work[iu:] - // Q in VT, storing result in A. - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, - work[iu:], ldworku, vt, ldvt, 0, a, lda) - - // Copy right singular vectors of A from A to VT. - impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) - } else { - // Insufficient workspace for a fast algorithm. - itau := 0 - iwork := itau + m - - // Compute A = L * Q, copying result to VT. - impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) - impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) - - // Generate Q in VT. - impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) - - // Copy L to U, zeroing out above it. - impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) - impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) - - ie = itau - itauq := ie + m - itaup := itauq + m - iwork = itaup + m - - // Bidiagonalize L in U. - impl.Dgebrd(m, m, u, ldu, s, work[ie:], work[itauq:], - work[itaup:], work[iwork:], lwork-iwork) - - // Multiply right bidiagonalizing vectors in U by Q in VT. - impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, - u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) - - // Generate left bidiagonalizing vectors in U. - impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) - iwork = ie + m - - // Perform bidiagonal QR iteration, computing left singular - // vectors of A in U and computing right singular vectors - // of A in VT. - ok = impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], - vt, ldvt, u, ldu, work, 1, work[iwork:]) - } - } - } - } else { - // Path 10t. - // N at least M, but not much larger. - ie = 0 - itauq := ie + m - itaup := itauq + m - iwork := itaup + m - - // Bidiagonalize A. - impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], work[itaup:], work[iwork:], lwork-iwork) - if wantuas { - // If left singular vectors desired in U, copy result to U and - // generate left bidiagonalizing vectors in U. - impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) - impl.Dorgbr(lapack.GenerateQ, m, m, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) - } - if wantvas { - // If right singular vectors desired in VT, copy result to VT - // and generate right bidiagonalizing vectors in VT. - impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) - var nrvt int - if wantva { - nrvt = n - } else { - nrvt = m - } - impl.Dorgbr(lapack.GeneratePT, nrvt, n, m, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) - } - if wantuo { - panic(noSVDO) - } - if wantvo { - panic(noSVDO) - } - iwork = ie + m - var nru, ncvt int - if wantuas || wantuo { - nru = m - } - if wantvas || wantvo { - ncvt = n - } - if !wantuo && !wantvo { - // Perform bidiagonal QR iteration, if desired, computing left - // singular vectors in U and computing right singular vectors in - // VT. - ok = impl.Dbdsqr(blas.Lower, m, ncvt, nru, 0, s, work[ie:], - vt, ldvt, u, ldu, work, 1, work[iwork:]) - } else { - // There will be two branches when the implementation is complete. - panic(noSVDO) - } - } - } - if !ok { - if ie > 1 { - for i := 0; i < minmn-1; i++ { - work[i+1] = work[i+ie] - } - } - if ie < 1 { - for i := minmn - 2; i >= 0; i-- { - work[i+1] = work[i+ie] - } - } - } - // Undo scaling if necessary. - if iscl { - if anrm > bignum { - impl.Dlascl(lapack.General, 0, 0, bignum, anrm, 1, minmn, s, minmn) - } - if !ok && anrm > bignum { - impl.Dlascl(lapack.General, 0, 0, bignum, anrm, 1, minmn-1, work[1:], minmn) - } - if anrm < smlnum { - impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, 1, minmn, s, minmn) - } - if !ok && anrm < smlnum { - impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, 1, minmn-1, work[1:], minmn) - } - } - work[0] = float64(maxwrk) - return ok -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go deleted file mode 100644 index 63ad72e99..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" -) - -// Dgetf2 computes the LU decomposition of the m×n matrix A. -// The LU decomposition is a factorization of a into -// A = P * L * U -// where P is a permutation matrix, L is a unit lower triangular matrix, and -// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored -// in place into a. -// -// ipiv is a permutation vector. It indicates that row i of the matrix was -// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic -// otherwise. ipiv is zero-indexed. -// -// Dgetf2 returns whether the matrix A is singular. The LU decomposition will -// be computed regardless of the singularity of A, but division by zero -// will occur if the false is returned and the result is used to solve a -// system of equations. -// -// Dgetf2 is an internal routine. It is exported for testing purposes. -func (Implementation) Dgetf2(m, n int, a []float64, lda int, ipiv []int) (ok bool) { - mn := min(m, n) - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if mn == 0 { - return true - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(ipiv) != mn: - panic(badLenIpiv) - } - - bi := blas64.Implementation() - - sfmin := dlamchS - ok = true - for j := 0; j < mn; j++ { - // Find a pivot and test for singularity. - jp := j + bi.Idamax(m-j, a[j*lda+j:], lda) - ipiv[j] = jp - if a[jp*lda+j] == 0 { - ok = false - } else { - // Swap the rows if necessary. - if jp != j { - bi.Dswap(n, a[j*lda:], 1, a[jp*lda:], 1) - } - if j < m-1 { - aj := a[j*lda+j] - if math.Abs(aj) >= sfmin { - bi.Dscal(m-j-1, 1/aj, a[(j+1)*lda+j:], lda) - } else { - for i := 0; i < m-j-1; i++ { - a[(j+1)*lda+j] = a[(j+1)*lda+j] / a[lda*j+j] - } - } - } - } - if j < mn-1 { - bi.Dger(m-j-1, n-j-1, -1, a[(j+1)*lda+j:], lda, a[j*lda+j+1:], 1, a[(j+1)*lda+j+1:], lda) - } - } - return ok -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go deleted file mode 100644 index ad01e71e4..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dgetrf computes the LU decomposition of the m×n matrix A. -// The LU decomposition is a factorization of A into -// A = P * L * U -// where P is a permutation matrix, L is a unit lower triangular matrix, and -// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored -// in place into a. -// -// ipiv is a permutation vector. It indicates that row i of the matrix was -// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic -// otherwise. ipiv is zero-indexed. -// -// Dgetrf is the blocked version of the algorithm. -// -// Dgetrf returns whether the matrix A is singular. The LU decomposition will -// be computed regardless of the singularity of A, but division by zero -// will occur if the false is returned and the result is used to solve a -// system of equations. -func (impl Implementation) Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) { - mn := min(m, n) - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if mn == 0 { - return true - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(ipiv) != mn: - panic(badLenIpiv) - } - - bi := blas64.Implementation() - - nb := impl.Ilaenv(1, "DGETRF", " ", m, n, -1, -1) - if nb <= 1 || mn <= nb { - // Use the unblocked algorithm. - return impl.Dgetf2(m, n, a, lda, ipiv) - } - ok = true - for j := 0; j < mn; j += nb { - jb := min(mn-j, nb) - blockOk := impl.Dgetf2(m-j, jb, a[j*lda+j:], lda, ipiv[j:j+jb]) - if !blockOk { - ok = false - } - for i := j; i <= min(m-1, j+jb-1); i++ { - ipiv[i] = j + ipiv[i] - } - impl.Dlaswp(j, a, lda, j, j+jb-1, ipiv[:j+jb], 1) - if j+jb < n { - impl.Dlaswp(n-j-jb, a[j+jb:], lda, j, j+jb-1, ipiv[:j+jb], 1) - bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, - jb, n-j-jb, 1, - a[j*lda+j:], lda, - a[j*lda+j+jb:], lda) - if j+jb < m { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m-j-jb, n-j-jb, jb, -1, - a[(j+jb)*lda+j:], lda, - a[j*lda+j+jb:], lda, - 1, a[(j+jb)*lda+j+jb:], lda) - } - } - } - return ok -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go deleted file mode 100644 index b2f2ae46b..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dgetri computes the inverse of the matrix A using the LU factorization computed -// by Dgetrf. On entry, a contains the PLU decomposition of A as computed by -// Dgetrf and on exit contains the reciprocal of the original matrix. -// -// Dgetri will not perform the inversion if the matrix is singular, and returns -// a boolean indicating whether the inversion was successful. -// -// work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= n and this function will panic otherwise. -// Dgetri is a blocked inversion, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dgetri, -// the optimal work length will be stored into work[0]. -func (impl Implementation) Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) { - iws := max(1, n) - switch { - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < iws && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - if n == 0 { - work[0] = 1 - return true - } - - nb := impl.Ilaenv(1, "DGETRI", " ", n, -1, -1, -1) - if lwork == -1 { - work[0] = float64(n * nb) - return true - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(ipiv) != n: - panic(badLenIpiv) - } - - // Form inv(U). - ok = impl.Dtrtri(blas.Upper, blas.NonUnit, n, a, lda) - if !ok { - return false - } - - nbmin := 2 - if 1 < nb && nb < n { - iws = max(n*nb, 1) - if lwork < iws { - nb = lwork / n - nbmin = max(2, impl.Ilaenv(2, "DGETRI", " ", n, -1, -1, -1)) - } - } - ldwork := nb - - bi := blas64.Implementation() - // Solve the equation inv(A)*L = inv(U) for inv(A). - // TODO(btracey): Replace this with a more row-major oriented algorithm. - if nb < nbmin || n <= nb { - // Unblocked code. - for j := n - 1; j >= 0; j-- { - for i := j + 1; i < n; i++ { - // Copy current column of L to work and replace with zeros. - work[i] = a[i*lda+j] - a[i*lda+j] = 0 - } - // Compute current column of inv(A). - if j < n-1 { - bi.Dgemv(blas.NoTrans, n, n-j-1, -1, a[(j+1):], lda, work[(j+1):], 1, 1, a[j:], lda) - } - } - } else { - // Blocked code. - nn := ((n - 1) / nb) * nb - for j := nn; j >= 0; j -= nb { - jb := min(nb, n-j) - // Copy current block column of L to work and replace - // with zeros. - for jj := j; jj < j+jb; jj++ { - for i := jj + 1; i < n; i++ { - work[i*ldwork+(jj-j)] = a[i*lda+jj] - a[i*lda+jj] = 0 - } - } - // Compute current block column of inv(A). - if j+jb < n { - bi.Dgemm(blas.NoTrans, blas.NoTrans, n, jb, n-j-jb, -1, a[(j+jb):], lda, work[(j+jb)*ldwork:], ldwork, 1, a[j:], lda) - } - bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, jb, 1, work[j*ldwork:], ldwork, a[j:], lda) - } - } - // Apply column interchanges. - for j := n - 2; j >= 0; j-- { - jp := ipiv[j] - if jp != j { - bi.Dswap(n, a[j:], lda, a[jp:], lda) - } - } - work[0] = float64(iws) - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go deleted file mode 100644 index ecc20d7c9..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dgetrs solves a system of equations using an LU factorization. -// The system of equations solved is -// A * X = B if trans == blas.Trans -// A^T * X = B if trans == blas.NoTrans -// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. -// -// On entry b contains the elements of the matrix B. On exit, b contains the -// elements of X, the solution to the system of equations. -// -// a and ipiv contain the LU factorization of A and the permutation indices as -// computed by Dgetrf. ipiv is zero-indexed. -func (impl Implementation) Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) { - switch { - case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: - panic(badTrans) - case n < 0: - panic(nLT0) - case nrhs < 0: - panic(nrhsLT0) - case lda < max(1, n): - panic(badLdA) - case ldb < max(1, nrhs): - panic(badLdB) - } - - // Quick return if possible. - if n == 0 || nrhs == 0 { - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(b) < (n-1)*ldb+nrhs: - panic(shortB) - case len(ipiv) != n: - panic(badLenIpiv) - } - - bi := blas64.Implementation() - - if trans == blas.NoTrans { - // Solve A * X = B. - impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, 1) - // Solve L * X = B, updating b. - bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, - n, nrhs, 1, a, lda, b, ldb) - // Solve U * X = B, updating b. - bi.Dtrsm(blas.Left, blas.Upper, blas.NoTrans, blas.NonUnit, - n, nrhs, 1, a, lda, b, ldb) - return - } - // Solve A^T * X = B. - // Solve U^T * X = B, updating b. - bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, - n, nrhs, 1, a, lda, b, ldb) - // Solve L^T * X = B, updating b. - bi.Dtrsm(blas.Left, blas.Lower, blas.Trans, blas.Unit, - n, nrhs, 1, a, lda, b, ldb) - impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, -1) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go deleted file mode 100644 index ac234dce3..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dggsvd3 computes the generalized singular value decomposition (GSVD) -// of an m×n matrix A and p×n matrix B: -// U^T*A*Q = D1*[ 0 R ] -// -// V^T*B*Q = D2*[ 0 R ] -// where U, V and Q are orthogonal matrices. -// -// Dggsvd3 returns k and l, the dimensions of the sub-blocks. k+l -// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. -// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and -// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following -// structures, respectively: -// -// If m-k-l >= 0, -// -// k l -// D1 = k [ I 0 ] -// l [ 0 C ] -// m-k-l [ 0 0 ] -// -// k l -// D2 = l [ 0 S ] -// p-l [ 0 0 ] -// -// n-k-l k l -// [ 0 R ] = k [ 0 R11 R12 ] k -// l [ 0 0 R22 ] l -// -// where -// -// C = diag( alpha_k, ... , alpha_{k+l} ), -// S = diag( beta_k, ... , beta_{k+l} ), -// C^2 + S^2 = I. -// -// R is stored in -// A[0:k+l, n-k-l:n] -// on exit. -// -// If m-k-l < 0, -// -// k m-k k+l-m -// D1 = k [ I 0 0 ] -// m-k [ 0 C 0 ] -// -// k m-k k+l-m -// D2 = m-k [ 0 S 0 ] -// k+l-m [ 0 0 I ] -// p-l [ 0 0 0 ] -// -// n-k-l k m-k k+l-m -// [ 0 R ] = k [ 0 R11 R12 R13 ] -// m-k [ 0 0 R22 R23 ] -// k+l-m [ 0 0 0 R33 ] -// -// where -// C = diag( alpha_k, ... , alpha_m ), -// S = diag( beta_k, ... , beta_m ), -// C^2 + S^2 = I. -// -// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] -// [ 0 R22 R23 ] -// and R33 is stored in -// B[m-k:l, n+m-k-l:n] on exit. -// -// Dggsvd3 computes C, S, R, and optionally the orthogonal transformation -// matrices U, V and Q. -// -// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior -// is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. -// The behavior is the same for jobV and jobQ with the exception that instead of -// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. -// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the -// relevant job parameter is lapack.GSVDNone. -// -// alpha and beta must have length n or Dggsvd3 will panic. On exit, alpha and -// beta contain the generalized singular value pairs of A and B -// alpha[0:k] = 1, -// beta[0:k] = 0, -// if m-k-l >= 0, -// alpha[k:k+l] = diag(C), -// beta[k:k+l] = diag(S), -// if m-k-l < 0, -// alpha[k:m]= C, alpha[m:k+l]= 0 -// beta[k:m] = S, beta[m:k+l] = 1. -// if k+l < n, -// alpha[k+l:n] = 0 and -// beta[k+l:n] = 0. -// -// On exit, iwork contains the permutation required to sort alpha descending. -// -// iwork must have length n, work must have length at least max(1, lwork), and -// lwork must be -1 or greater than n, otherwise Dggsvd3 will panic. If -// lwork is -1, work[0] holds the optimal lwork on return, but Dggsvd3 does -// not perform the GSVD. -func (impl Implementation) Dggsvd3(jobU, jobV, jobQ lapack.GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) { - wantu := jobU == lapack.GSVDU - wantv := jobV == lapack.GSVDV - wantq := jobQ == lapack.GSVDQ - switch { - case !wantu && jobU != lapack.GSVDNone: - panic(badGSVDJob + "U") - case !wantv && jobV != lapack.GSVDNone: - panic(badGSVDJob + "V") - case !wantq && jobQ != lapack.GSVDNone: - panic(badGSVDJob + "Q") - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case p < 0: - panic(pLT0) - case lda < max(1, n): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - case ldu < 1, wantu && ldu < m: - panic(badLdU) - case ldv < 1, wantv && ldv < p: - panic(badLdV) - case ldq < 1, wantq && ldq < n: - panic(badLdQ) - case len(iwork) < n: - panic(shortWork) - case lwork < 1 && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Determine optimal work length. - impl.Dggsvp3(jobU, jobV, jobQ, - m, p, n, - a, lda, - b, ldb, - 0, 0, - u, ldu, - v, ldv, - q, ldq, - iwork, - work, work, -1) - lwkopt := n + int(work[0]) - lwkopt = max(lwkopt, 2*n) - lwkopt = max(lwkopt, 1) - work[0] = float64(lwkopt) - if lwork == -1 { - return 0, 0, true - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(b) < (p-1)*ldb+n: - panic(shortB) - case wantu && len(u) < (m-1)*ldu+m: - panic(shortU) - case wantv && len(v) < (p-1)*ldv+p: - panic(shortV) - case wantq && len(q) < (n-1)*ldq+n: - panic(shortQ) - case len(alpha) != n: - panic(badLenAlpha) - case len(beta) != n: - panic(badLenBeta) - } - - // Compute the Frobenius norm of matrices A and B. - anorm := impl.Dlange(lapack.Frobenius, m, n, a, lda, nil) - bnorm := impl.Dlange(lapack.Frobenius, p, n, b, ldb, nil) - - // Get machine precision and set up threshold for determining - // the effective numerical rank of the matrices A and B. - tola := float64(max(m, n)) * math.Max(anorm, dlamchS) * dlamchP - tolb := float64(max(p, n)) * math.Max(bnorm, dlamchS) * dlamchP - - // Preprocessing. - k, l = impl.Dggsvp3(jobU, jobV, jobQ, - m, p, n, - a, lda, - b, ldb, - tola, tolb, - u, ldu, - v, ldv, - q, ldq, - iwork, - work[:n], work[n:], lwork-n) - - // Compute the GSVD of two upper "triangular" matrices. - _, ok = impl.Dtgsja(jobU, jobV, jobQ, - m, p, n, - k, l, - a, lda, - b, ldb, - tola, tolb, - alpha, beta, - u, ldu, - v, ldv, - q, ldq, - work) - - // Sort the singular values and store the pivot indices in iwork - // Copy alpha to work, then sort alpha in work. - bi := blas64.Implementation() - bi.Dcopy(n, alpha, 1, work[:n], 1) - ibnd := min(l, m-k) - for i := 0; i < ibnd; i++ { - // Scan for largest alpha_{k+i}. - isub := i - smax := work[k+i] - for j := i + 1; j < ibnd; j++ { - if v := work[k+j]; v > smax { - isub = j - smax = v - } - } - if isub != i { - work[k+isub] = work[k+i] - work[k+i] = smax - iwork[k+i] = k + isub - } else { - iwork[k+i] = k + i - } - } - - work[0] = float64(lwkopt) - - return k, l, ok -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go deleted file mode 100644 index 7a9ad9fbf..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dggsvp3 computes orthogonal matrices U, V and Q such that -// -// n-k-l k l -// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l >= 0; -// l [ 0 0 A23 ] -// m-k-l [ 0 0 0 ] -// -// n-k-l k l -// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l < 0; -// m-k [ 0 0 A23 ] -// -// n-k-l k l -// V^T*B*Q = l [ 0 0 B13 ] -// p-l [ 0 0 0 ] -// -// where the k×k matrix A12 and l×l matrix B13 are non-singular -// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, -// otherwise A23 is (m-k)×l upper trapezoidal. -// -// Dggsvp3 returns k and l, the dimensions of the sub-blocks. k+l -// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. -// -// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior -// is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. -// The behavior is the same for jobV and jobQ with the exception that instead of -// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. -// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the -// relevant job parameter is lapack.GSVDNone. -// -// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz -// iteration procedure. Generally, they are the same as used in the preprocessing -// step, for example, -// tola = max(m, n)*norm(A)*eps, -// tolb = max(p, n)*norm(B)*eps. -// Where eps is the machine epsilon. -// -// iwork must have length n, work must have length at least max(1, lwork), and -// lwork must be -1 or greater than zero, otherwise Dggsvp3 will panic. -// -// Dggsvp3 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dggsvp3(jobU, jobV, jobQ lapack.GSVDJob, m, p, n int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, iwork []int, tau, work []float64, lwork int) (k, l int) { - wantu := jobU == lapack.GSVDU - wantv := jobV == lapack.GSVDV - wantq := jobQ == lapack.GSVDQ - switch { - case !wantu && jobU != lapack.GSVDNone: - panic(badGSVDJob + "U") - case !wantv && jobV != lapack.GSVDNone: - panic(badGSVDJob + "V") - case !wantq && jobQ != lapack.GSVDNone: - panic(badGSVDJob + "Q") - case m < 0: - panic(mLT0) - case p < 0: - panic(pLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - case ldu < 1, wantu && ldu < m: - panic(badLdU) - case ldv < 1, wantv && ldv < p: - panic(badLdV) - case ldq < 1, wantq && ldq < n: - panic(badLdQ) - case len(iwork) != n: - panic(shortWork) - case lwork < 1 && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - var lwkopt int - impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, -1) - lwkopt = int(work[0]) - if wantv { - lwkopt = max(lwkopt, p) - } - lwkopt = max(lwkopt, min(n, p)) - lwkopt = max(lwkopt, m) - if wantq { - lwkopt = max(lwkopt, n) - } - impl.Dgeqp3(m, n, a, lda, iwork, tau, work, -1) - lwkopt = max(lwkopt, int(work[0])) - lwkopt = max(1, lwkopt) - if lwork == -1 { - work[0] = float64(lwkopt) - return 0, 0 - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(b) < (p-1)*ldb+n: - panic(shortB) - case wantu && len(u) < (m-1)*ldu+m: - panic(shortU) - case wantv && len(v) < (p-1)*ldv+p: - panic(shortV) - case wantq && len(q) < (n-1)*ldq+n: - panic(shortQ) - case len(tau) < n: - // tau check must come after lwkopt query since - // the Dggsvd3 call for lwkopt query may have - // lwork == -1, and tau is provided by work. - panic(shortTau) - } - - const forward = true - - // QR with column pivoting of B: B*P = V*[ S11 S12 ]. - // [ 0 0 ] - for i := range iwork[:n] { - iwork[i] = 0 - } - impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, lwork) - - // Update A := A*P. - impl.Dlapmt(forward, m, n, a, lda, iwork) - - // Determine the effective rank of matrix B. - for i := 0; i < min(p, n); i++ { - if math.Abs(b[i*ldb+i]) > tolb { - l++ - } - } - - if wantv { - // Copy the details of V, and form V. - impl.Dlaset(blas.All, p, p, 0, 0, v, ldv) - if p > 1 { - impl.Dlacpy(blas.Lower, p-1, min(p, n), b[ldb:], ldb, v[ldv:], ldv) - } - impl.Dorg2r(p, p, min(p, n), v, ldv, tau, work) - } - - // Clean up B. - for i := 1; i < l; i++ { - r := b[i*ldb : i*ldb+i] - for j := range r { - r[j] = 0 - } - } - if p > l { - impl.Dlaset(blas.All, p-l, n, 0, 0, b[l*ldb:], ldb) - } - - if wantq { - // Set Q = I and update Q := Q*P. - impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) - impl.Dlapmt(forward, n, n, q, ldq, iwork) - } - - if p >= l && n != l { - // RQ factorization of [ S11 S12 ]: [ S11 S12 ] = [ 0 S12 ]*Z. - impl.Dgerq2(l, n, b, ldb, tau, work) - - // Update A := A*Z^T. - impl.Dormr2(blas.Right, blas.Trans, m, n, l, b, ldb, tau, a, lda, work) - - if wantq { - // Update Q := Q*Z^T. - impl.Dormr2(blas.Right, blas.Trans, n, n, l, b, ldb, tau, q, ldq, work) - } - - // Clean up B. - impl.Dlaset(blas.All, l, n-l, 0, 0, b, ldb) - for i := 1; i < l; i++ { - r := b[i*ldb+n-l : i*ldb+i+n-l] - for j := range r { - r[j] = 0 - } - } - } - - // Let N-L L - // A = [ A11 A12 ] M, - // - // then the following does the complete QR decomposition of A11: - // - // A11 = U*[ 0 T12 ]*P1^T. - // [ 0 0 ] - for i := range iwork[:n-l] { - iwork[i] = 0 - } - impl.Dgeqp3(m, n-l, a, lda, iwork[:n-l], tau, work, lwork) - - // Determine the effective rank of A11. - for i := 0; i < min(m, n-l); i++ { - if math.Abs(a[i*lda+i]) > tola { - k++ - } - } - - // Update A12 := U^T*A12, where A12 = A[0:m, n-l:n]. - impl.Dorm2r(blas.Left, blas.Trans, m, l, min(m, n-l), a, lda, tau, a[n-l:], lda, work) - - if wantu { - // Copy the details of U, and form U. - impl.Dlaset(blas.All, m, m, 0, 0, u, ldu) - if m > 1 { - impl.Dlacpy(blas.Lower, m-1, min(m, n-l), a[lda:], lda, u[ldu:], ldu) - } - impl.Dorg2r(m, m, min(m, n-l), u, ldu, tau, work) - } - - if wantq { - // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*P1. - impl.Dlapmt(forward, n, n-l, q, ldq, iwork[:n-l]) - } - - // Clean up A: set the strictly lower triangular part of - // A[0:k, 0:k] = 0, and A[k:m, 0:n-l] = 0. - for i := 1; i < k; i++ { - r := a[i*lda : i*lda+i] - for j := range r { - r[j] = 0 - } - } - if m > k { - impl.Dlaset(blas.All, m-k, n-l, 0, 0, a[k*lda:], lda) - } - - if n-l > k { - // RQ factorization of [ T11 T12 ] = [ 0 T12 ]*Z1. - impl.Dgerq2(k, n-l, a, lda, tau, work) - - if wantq { - // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*Z1^T. - impl.Dorm2r(blas.Right, blas.Trans, n, n-l, k, a, lda, tau, q, ldq, work) - } - - // Clean up A. - impl.Dlaset(blas.All, k, n-l-k, 0, 0, a, lda) - for i := 1; i < k; i++ { - r := a[i*lda+n-k-l : i*lda+i+n-k-l] - for j := range r { - a[j] = 0 - } - } - } - - if m > k { - // QR factorization of A[k:m, n-l:n]. - impl.Dgeqr2(m-k, l, a[k*lda+n-l:], lda, tau, work) - if wantu { - // Update U[:, k:m) := U[:, k:m]*U1. - impl.Dorm2r(blas.Right, blas.NoTrans, m, m-k, min(m-k, l), a[k*lda+n-l:], lda, tau, u[k:], ldu, work) - } - - // Clean up A. - for i := k + 1; i < m; i++ { - r := a[i*lda+n-l : i*lda+min(n-l+i-k, n)] - for j := range r { - r[j] = 0 - } - } - } - - work[0] = float64(lwkopt) - return k, l -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go deleted file mode 100644 index ed3fbca85..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dhseqr computes the eigenvalues of an n×n Hessenberg matrix H and, -// optionally, the matrices T and Z from the Schur decomposition -// H = Z T Z^T, -// where T is an n×n upper quasi-triangular matrix (the Schur form), and Z is -// the n×n orthogonal matrix of Schur vectors. -// -// Optionally Z may be postmultiplied into an input orthogonal matrix Q so that -// this routine can give the Schur factorization of a matrix A which has been -// reduced to the Hessenberg form H by the orthogonal matrix Q: -// A = Q H Q^T = (QZ) T (QZ)^T. -// -// If job == lapack.EigenvaluesOnly, only the eigenvalues will be computed. -// If job == lapack.EigenvaluesAndSchur, the eigenvalues and the Schur form T will -// be computed. -// For other values of job Dhseqr will panic. -// -// If compz == lapack.SchurNone, no Schur vectors will be computed and Z will not be -// referenced. -// If compz == lapack.SchurHess, on return Z will contain the matrix of Schur -// vectors of H. -// If compz == lapack.SchurOrig, on entry z is assumed to contain the orthogonal -// matrix Q that is the identity except for the submatrix -// Q[ilo:ihi+1,ilo:ihi+1]. On return z will be updated to the product Q*Z. -// -// ilo and ihi determine the block of H on which Dhseqr operates. It is assumed -// that H is already upper triangular in rows and columns [0:ilo] and [ihi+1:n], -// although it will be only checked that the block is isolated, that is, -// ilo == 0 or H[ilo,ilo-1] == 0, -// ihi == n-1 or H[ihi+1,ihi] == 0, -// and Dhseqr will panic otherwise. ilo and ihi are typically set by a previous -// call to Dgebal, otherwise they should be set to 0 and n-1, respectively. It -// must hold that -// 0 <= ilo <= ihi < n, if n > 0, -// ilo == 0 and ihi == -1, if n == 0. -// -// wr and wi must have length n. -// -// work must have length at least lwork and lwork must be at least max(1,n) -// otherwise Dhseqr will panic. The minimum lwork delivers very good and -// sometimes optimal performance, although lwork as large as 11*n may be -// required. On return, work[0] will contain the optimal value of lwork. -// -// If lwork is -1, instead of performing Dhseqr, the function only estimates the -// optimal workspace size and stores it into work[0]. Neither h nor z are -// accessed. -// -// unconverged indicates whether Dhseqr computed all the eigenvalues. -// -// If unconverged == 0, all the eigenvalues have been computed and their real -// and imaginary parts will be stored on return in wr and wi, respectively. If -// two eigenvalues are computed as a complex conjugate pair, they are stored in -// consecutive elements of wr and wi, say the i-th and (i+1)th, with wi[i] > 0 -// and wi[i+1] < 0. -// -// If unconverged == 0 and job == lapack.EigenvaluesAndSchur, on return H will -// contain the upper quasi-triangular matrix T from the Schur decomposition (the -// Schur form). 2×2 diagonal blocks (corresponding to complex conjugate pairs of -// eigenvalues) will be returned in standard form, with -// H[i,i] == H[i+1,i+1], -// and -// H[i+1,i]*H[i,i+1] < 0. -// The eigenvalues will be stored in wr and wi in the same order as on the -// diagonal of the Schur form returned in H, with -// wr[i] = H[i,i], -// and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, -// wi[i] = sqrt(-H[i+1,i]*H[i,i+1]), -// wi[i+1] = -wi[i]. -// -// If unconverged == 0 and job == lapack.EigenvaluesOnly, the contents of h -// on return is unspecified. -// -// If unconverged > 0, some eigenvalues have not converged, and the blocks -// [0:ilo] and [unconverged:n] of wr and wi will contain those eigenvalues which -// have been successfully computed. Failures are rare. -// -// If unconverged > 0 and job == lapack.EigenvaluesOnly, on return the -// remaining unconverged eigenvalues are the eigenvalues of the upper Hessenberg -// matrix H[ilo:unconverged,ilo:unconverged]. -// -// If unconverged > 0 and job == lapack.EigenvaluesAndSchur, then on -// return -// (initial H) U = U (final H), (*) -// where U is an orthogonal matrix. The final H is upper Hessenberg and -// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. -// -// If unconverged > 0 and compz == lapack.SchurOrig, then on return -// (final Z) = (initial Z) U, -// where U is the orthogonal matrix in (*) regardless of the value of job. -// -// If unconverged > 0 and compz == lapack.SchurHess, then on return -// (final Z) = U, -// where U is the orthogonal matrix in (*) regardless of the value of job. -// -// References: -// [1] R. Byers. LAPACK 3.1 xHSEQR: Tuning and Implementation Notes on the -// Small Bulge Multi-Shift QR Algorithm with Aggressive Early Deflation. -// LAPACK Working Note 187 (2007) -// URL: http://www.netlib.org/lapack/lawnspdf/lawn187.pdf -// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: -// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix -// Anal. Appl. 23(4) (2002), pp. 929—947 -// URL: http://dx.doi.org/10.1137/S0895479801384573 -// [3] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: -// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 -// URL: http://dx.doi.org/10.1137/S0895479801384585 -// -// Dhseqr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dhseqr(job lapack.SchurJob, compz lapack.SchurComp, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, z []float64, ldz int, work []float64, lwork int) (unconverged int) { - wantt := job == lapack.EigenvaluesAndSchur - wantz := compz == lapack.SchurHess || compz == lapack.SchurOrig - - switch { - case job != lapack.EigenvaluesOnly && job != lapack.EigenvaluesAndSchur: - panic(badSchurJob) - case compz != lapack.SchurNone && compz != lapack.SchurHess && compz != lapack.SchurOrig: - panic(badSchurComp) - case n < 0: - panic(nLT0) - case ilo < 0 || max(0, n-1) < ilo: - panic(badIlo) - case ihi < min(ilo, n-1) || n <= ihi: - panic(badIhi) - case ldh < max(1, n): - panic(badLdH) - case ldz < 1, wantz && ldz < n: - panic(badLdZ) - case lwork < max(1, n) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - work[0] = 1 - return 0 - } - - // Quick return in case of a workspace query. - if lwork == -1 { - impl.Dlaqr04(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, ilo, ihi, z, ldz, work, -1, 1) - work[0] = math.Max(float64(n), work[0]) - return 0 - } - - switch { - case len(h) < (n-1)*ldh+n: - panic(shortH) - case wantz && len(z) < (n-1)*ldz+n: - panic(shortZ) - case len(wr) < n: - panic(shortWr) - case len(wi) < n: - panic(shortWi) - } - - const ( - // Matrices of order ntiny or smaller must be processed by - // Dlahqr because of insufficient subdiagonal scratch space. - // This is a hard limit. - ntiny = 11 - - // nl is the size of a local workspace to help small matrices - // through a rare Dlahqr failure. nl > ntiny is required and - // nl <= nmin = Ilaenv(ispec=12,...) is recommended (the default - // value of nmin is 75). Using nl = 49 allows up to six - // simultaneous shifts and a 16×16 deflation window. - nl = 49 - ) - - // Copy eigenvalues isolated by Dgebal. - for i := 0; i < ilo; i++ { - wr[i] = h[i*ldh+i] - wi[i] = 0 - } - for i := ihi + 1; i < n; i++ { - wr[i] = h[i*ldh+i] - wi[i] = 0 - } - - // Initialize Z to identity matrix if requested. - if compz == lapack.SchurHess { - impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) - } - - // Quick return if possible. - if ilo == ihi { - wr[ilo] = h[ilo*ldh+ilo] - wi[ilo] = 0 - return 0 - } - - // Dlahqr/Dlaqr04 crossover point. - nmin := impl.Ilaenv(12, "DHSEQR", string(job)+string(compz), n, ilo, ihi, lwork) - nmin = max(ntiny, nmin) - - if n > nmin { - // Dlaqr0 for big matrices. - unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], - ilo, ihi, z, ldz, work, lwork, 1) - } else { - // Dlahqr for small matrices. - unconverged = impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], - ilo, ihi, z, ldz) - if unconverged > 0 { - // A rare Dlahqr failure! Dlaqr04 sometimes succeeds - // when Dlahqr fails. - kbot := unconverged - if n >= nl { - // Larger matrices have enough subdiagonal - // scratch space to call Dlaqr04 directly. - unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, kbot, h, ldh, - wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, work, lwork, 1) - } else { - // Tiny matrices don't have enough subdiagonal - // scratch space to benefit from Dlaqr04. Hence, - // tiny matrices must be copied into a larger - // array before calling Dlaqr04. - var hl [nl * nl]float64 - impl.Dlacpy(blas.All, n, n, h, ldh, hl[:], nl) - impl.Dlaset(blas.All, nl, nl-n, 0, 0, hl[n:], nl) - var workl [nl]float64 - unconverged = impl.Dlaqr04(wantt, wantz, nl, ilo, kbot, hl[:], nl, - wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, workl[:], nl, 1) - work[0] = workl[0] - if wantt || unconverged > 0 { - impl.Dlacpy(blas.All, n, n, hl[:], nl, h, ldh) - } - } - } - } - // Zero out under the first subdiagonal, if necessary. - if (wantt || unconverged > 0) && n > 2 { - impl.Dlaset(blas.Lower, n-2, n-2, 0, 0, h[2*ldh:], ldh) - } - - work[0] = math.Max(float64(n), work[0]) - return unconverged -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go deleted file mode 100644 index babc0b7c0..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlabrd reduces the first NB rows and columns of a real general m×n matrix -// A to upper or lower bidiagonal form by an orthogonal transformation -// Q**T * A * P -// If m >= n, A is reduced to upper bidiagonal form and upon exit the elements -// on and below the diagonal in the first nb columns represent the elementary -// reflectors, and the elements above the diagonal in the first nb rows represent -// the matrix P. If m < n, A is reduced to lower bidiagonal form and the elements -// P is instead stored above the diagonal. -// -// The reduction to bidiagonal form is stored in d and e, where d are the diagonal -// elements, and e are the off-diagonal elements. -// -// The matrices Q and P are products of elementary reflectors -// Q = H_0 * H_1 * ... * H_{nb-1} -// P = G_0 * G_1 * ... * G_{nb-1} -// where -// H_i = I - tauQ[i] * v_i * v_i^T -// G_i = I - tauP[i] * u_i * u_i^T -// -// As an example, on exit the entries of A when m = 6, n = 5, and nb = 2 -// [ 1 1 u1 u1 u1] -// [v1 1 1 u2 u2] -// [v1 v2 a a a] -// [v1 v2 a a a] -// [v1 v2 a a a] -// [v1 v2 a a a] -// and when m = 5, n = 6, and nb = 2 -// [ 1 u1 u1 u1 u1 u1] -// [ 1 1 u2 u2 u2 u2] -// [v1 1 a a a a] -// [v1 v2 a a a a] -// [v1 v2 a a a a] -// -// Dlabrd also returns the matrices X and Y which are used with U and V to -// apply the transformation to the unreduced part of the matrix -// A := A - V*Y^T - X*U^T -// and returns the matrices X and Y which are needed to apply the -// transformation to the unreduced part of A. -// -// X is an m×nb matrix, Y is an n×nb matrix. d, e, taup, and tauq must all have -// length at least nb. Dlabrd will panic if these size constraints are violated. -// -// Dlabrd is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlabrd(m, n, nb int, a []float64, lda int, d, e, tauQ, tauP, x []float64, ldx int, y []float64, ldy int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case nb < 0: - panic(nbLT0) - case nb > n: - panic(nbGTN) - case nb > m: - panic(nbGTM) - case lda < max(1, n): - panic(badLdA) - case ldx < max(1, nb): - panic(badLdX) - case ldy < max(1, nb): - panic(badLdY) - } - - if m == 0 || n == 0 || nb == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(d) < nb: - panic(shortD) - case len(e) < nb: - panic(shortE) - case len(tauQ) < nb: - panic(shortTauQ) - case len(tauP) < nb: - panic(shortTauP) - case len(x) < (m-1)*ldx+nb: - panic(shortX) - case len(y) < (n-1)*ldy+nb: - panic(shortY) - } - - bi := blas64.Implementation() - - if m >= n { - // Reduce to upper bidiagonal form. - for i := 0; i < nb; i++ { - bi.Dgemv(blas.NoTrans, m-i, i, -1, a[i*lda:], lda, y[i*ldy:], 1, 1, a[i*lda+i:], lda) - bi.Dgemv(blas.NoTrans, m-i, i, -1, x[i*ldx:], ldx, a[i:], lda, 1, a[i*lda+i:], lda) - - a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) - d[i] = a[i*lda+i] - if i < n-1 { - // Compute Y[i+1:n, i]. - a[i*lda+i] = 1 - bi.Dgemv(blas.Trans, m-i, n-i-1, 1, a[i*lda+i+1:], lda, a[i*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) - bi.Dgemv(blas.Trans, m-i, i, 1, a[i*lda:], lda, a[i*lda+i:], lda, 0, y[i:], ldy) - bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) - bi.Dgemv(blas.Trans, m-i, i, 1, x[i*ldx:], ldx, a[i*lda+i:], lda, 0, y[i:], ldy) - bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) - bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) - - // Update A[i, i+1:n]. - bi.Dgemv(blas.NoTrans, n-i-1, i+1, -1, y[(i+1)*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i+1:], 1) - bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, x[i*ldx:], 1, 1, a[i*lda+i+1:], 1) - - // Generate reflection P[i] to annihilate A[i, i+2:n]. - a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) - e[i] = a[i*lda+i+1] - a[i*lda+i+1] = 1 - - // Compute X[i+1:m, i]. - bi.Dgemv(blas.NoTrans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[i*lda+i+1:], 1, 0, x[(i+1)*ldx+i:], ldx) - bi.Dgemv(blas.Trans, n-i-1, i+1, 1, y[(i+1)*ldy:], ldy, a[i*lda+i+1:], 1, 0, x[i:], ldx) - bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) - bi.Dgemv(blas.NoTrans, i, n-i-1, 1, a[i+1:], lda, a[i*lda+i+1:], 1, 0, x[i:], ldx) - bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) - bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) - } - } - return - } - // Reduce to lower bidiagonal form. - for i := 0; i < nb; i++ { - // Update A[i,i:n] - bi.Dgemv(blas.NoTrans, n-i, i, -1, y[i*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i:], 1) - bi.Dgemv(blas.Trans, i, n-i, -1, a[i:], lda, x[i*ldx:], 1, 1, a[i*lda+i:], 1) - - // Generate reflection P[i] to annihilate A[i, i+1:n] - a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) - d[i] = a[i*lda+i] - if i < m-1 { - a[i*lda+i] = 1 - // Compute X[i+1:m, i]. - bi.Dgemv(blas.NoTrans, m-i-1, n-i, 1, a[(i+1)*lda+i:], lda, a[i*lda+i:], 1, 0, x[(i+1)*ldx+i:], ldx) - bi.Dgemv(blas.Trans, n-i, i, 1, y[i*ldy:], ldy, a[i*lda+i:], 1, 0, x[i:], ldx) - bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) - bi.Dgemv(blas.NoTrans, i, n-i, 1, a[i:], lda, a[i*lda+i:], 1, 0, x[i:], ldx) - bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) - bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) - - // Update A[i+1:m, i]. - bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, y[i*ldy:], 1, 1, a[(i+1)*lda+i:], lda) - bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, x[(i+1)*ldx:], ldx, a[i:], lda, 1, a[(i+1)*lda+i:], lda) - - // Generate reflection Q[i] to annihilate A[i+2:m, i]. - a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) - e[i] = a[(i+1)*lda+i] - a[(i+1)*lda+i] = 1 - - // Compute Y[i+1:n, i]. - bi.Dgemv(blas.Trans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) - bi.Dgemv(blas.Trans, m-i-1, i, 1, a[(i+1)*lda:], lda, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) - bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) - bi.Dgemv(blas.Trans, m-i-1, i+1, 1, x[(i+1)*ldx:], ldx, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) - bi.Dgemv(blas.Trans, i+1, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) - bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go deleted file mode 100644 index e8ac1e439..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlacn2 estimates the 1-norm of an n×n matrix A using sequential updates with -// matrix-vector products provided externally. -// -// Dlacn2 is called sequentially and it returns the value of est and kase to be -// used on the next call. -// On the initial call, kase must be 0. -// In between calls, x must be overwritten by -// A * X if kase was returned as 1, -// A^T * X if kase was returned as 2, -// and all other parameters must not be changed. -// On the final return, kase is returned as 0, v contains A*W where W is a -// vector, and est = norm(V)/norm(W) is a lower bound for 1-norm of A. -// -// v, x, and isgn must all have length n and n must be at least 1, otherwise -// Dlacn2 will panic. isave is used for temporary storage. -// -// Dlacn2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlacn2(n int, v, x []float64, isgn []int, est float64, kase int, isave *[3]int) (float64, int) { - switch { - case n < 1: - panic(nLT1) - case len(v) < n: - panic(shortV) - case len(x) < n: - panic(shortX) - case len(isgn) < n: - panic(shortIsgn) - case isave[0] < 0 || 5 < isave[0]: - panic(badIsave) - case isave[0] == 0 && kase != 0: - panic(badIsave) - } - - const itmax = 5 - bi := blas64.Implementation() - - if kase == 0 { - for i := 0; i < n; i++ { - x[i] = 1 / float64(n) - } - kase = 1 - isave[0] = 1 - return est, kase - } - switch isave[0] { - case 1: - if n == 1 { - v[0] = x[0] - est = math.Abs(v[0]) - kase = 0 - return est, kase - } - est = bi.Dasum(n, x, 1) - for i := 0; i < n; i++ { - x[i] = math.Copysign(1, x[i]) - isgn[i] = int(x[i]) - } - kase = 2 - isave[0] = 2 - return est, kase - case 2: - isave[1] = bi.Idamax(n, x, 1) - isave[2] = 2 - for i := 0; i < n; i++ { - x[i] = 0 - } - x[isave[1]] = 1 - kase = 1 - isave[0] = 3 - return est, kase - case 3: - bi.Dcopy(n, x, 1, v, 1) - estold := est - est = bi.Dasum(n, v, 1) - sameSigns := true - for i := 0; i < n; i++ { - if int(math.Copysign(1, x[i])) != isgn[i] { - sameSigns = false - break - } - } - if !sameSigns && est > estold { - for i := 0; i < n; i++ { - x[i] = math.Copysign(1, x[i]) - isgn[i] = int(x[i]) - } - kase = 2 - isave[0] = 4 - return est, kase - } - case 4: - jlast := isave[1] - isave[1] = bi.Idamax(n, x, 1) - if x[jlast] != math.Abs(x[isave[1]]) && isave[2] < itmax { - isave[2] += 1 - for i := 0; i < n; i++ { - x[i] = 0 - } - x[isave[1]] = 1 - kase = 1 - isave[0] = 3 - return est, kase - } - case 5: - tmp := 2 * (bi.Dasum(n, x, 1)) / float64(3*n) - if tmp > est { - bi.Dcopy(n, x, 1, v, 1) - est = tmp - } - kase = 0 - return est, kase - } - // Iteration complete. Final stage - altsgn := 1.0 - for i := 0; i < n; i++ { - x[i] = altsgn * (1 + float64(i)/float64(n-1)) - altsgn *= -1 - } - kase = 1 - isave[0] = 5 - return est, kase -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go deleted file mode 100644 index a37f3b0db..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dlacpy copies the elements of A specified by uplo into B. Uplo can specify -// a triangular portion with blas.Upper or blas.Lower, or can specify all of the -// elemest with blas.All. -// -// Dlacpy is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlacpy(uplo blas.Uplo, m, n int, a []float64, lda int, b []float64, ldb int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower && uplo != blas.All: - panic(badUplo) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case ldb < max(1, n): - panic(badLdB) - } - - if m == 0 || n == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(b) < (m-1)*ldb+n: - panic(shortB) - } - - switch uplo { - case blas.Upper: - for i := 0; i < m; i++ { - for j := i; j < n; j++ { - b[i*ldb+j] = a[i*lda+j] - } - } - case blas.Lower: - for i := 0; i < m; i++ { - for j := 0; j < min(i+1, n); j++ { - b[i*ldb+j] = a[i*lda+j] - } - } - case blas.All: - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - b[i*ldb+j] = a[i*lda+j] - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go deleted file mode 100644 index c071fec7d..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlae2 computes the eigenvalues of a 2×2 symmetric matrix -// [a b] -// [b c] -// and returns the eigenvalue with the larger absolute value as rt1 and the -// smaller as rt2. -// -// Dlae2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlae2(a, b, c float64) (rt1, rt2 float64) { - sm := a + c - df := a - c - adf := math.Abs(df) - tb := b + b - ab := math.Abs(tb) - acmx := c - acmn := a - if math.Abs(a) > math.Abs(c) { - acmx = a - acmn = c - } - var rt float64 - if adf > ab { - rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) - } else if adf < ab { - rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) - } else { - rt = ab * math.Sqrt2 - } - if sm < 0 { - rt1 = 0.5 * (sm - rt) - rt2 = (acmx/rt1)*acmn - (b/rt1)*b - return rt1, rt2 - } - if sm > 0 { - rt1 = 0.5 * (sm + rt) - rt2 = (acmx/rt1)*acmn - (b/rt1)*b - return rt1, rt2 - } - rt1 = 0.5 * rt - rt2 = -0.5 * rt - return rt1, rt2 -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go deleted file mode 100644 index 74d75b913..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlaev2 computes the Eigen decomposition of a symmetric 2×2 matrix. -// The matrix is given by -// [a b] -// [b c] -// Dlaev2 returns rt1 and rt2, the eigenvalues of the matrix where |RT1| > |RT2|, -// and [cs1, sn1] which is the unit right eigenvalue for RT1. -// [ cs1 sn1] [a b] [cs1 -sn1] = [rt1 0] -// [-sn1 cs1] [b c] [sn1 cs1] [ 0 rt2] -// -// Dlaev2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaev2(a, b, c float64) (rt1, rt2, cs1, sn1 float64) { - sm := a + c - df := a - c - adf := math.Abs(df) - tb := b + b - ab := math.Abs(tb) - acmx := c - acmn := a - if math.Abs(a) > math.Abs(c) { - acmx = a - acmn = c - } - var rt float64 - if adf > ab { - rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) - } else if adf < ab { - rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) - } else { - rt = ab * math.Sqrt(2) - } - var sgn1 float64 - if sm < 0 { - rt1 = 0.5 * (sm - rt) - sgn1 = -1 - rt2 = (acmx/rt1)*acmn - (b/rt1)*b - } else if sm > 0 { - rt1 = 0.5 * (sm + rt) - sgn1 = 1 - rt2 = (acmx/rt1)*acmn - (b/rt1)*b - } else { - rt1 = 0.5 * rt - rt2 = -0.5 * rt - sgn1 = 1 - } - var cs, sgn2 float64 - if df >= 0 { - cs = df + rt - sgn2 = 1 - } else { - cs = df - rt - sgn2 = -1 - } - acs := math.Abs(cs) - if acs > ab { - ct := -tb / cs - sn1 = 1 / math.Sqrt(1+ct*ct) - cs1 = ct * sn1 - } else { - if ab == 0 { - cs1 = 1 - sn1 = 0 - } else { - tn := -cs / tb - cs1 = 1 / math.Sqrt(1+tn*tn) - sn1 = tn * cs1 - } - } - if sgn1 == sgn2 { - tn := cs1 - cs1 = -sn1 - sn1 = tn - } - return rt1, rt2, cs1, sn1 -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go deleted file mode 100644 index 2b79bd8ae..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dlaexc swaps two adjacent diagonal blocks of order 1 or 2 in an n×n upper -// quasi-triangular matrix T by an orthogonal similarity transformation. -// -// T must be in Schur canonical form, that is, block upper triangular with 1×1 -// and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal elements -// equal and its off-diagonal elements of opposite sign. On return, T will -// contain the updated matrix again in Schur canonical form. -// -// If wantq is true, the transformation is accumulated in the n×n matrix Q, -// otherwise Q is not referenced. -// -// j1 is the index of the first row of the first block. n1 and n2 are the order -// of the first and second block, respectively. -// -// work must have length at least n, otherwise Dlaexc will panic. -// -// If ok is false, the transformed matrix T would be too far from Schur form. -// The blocks are not swapped, and T and Q are not modified. -// -// If n1 and n2 are both equal to 1, Dlaexc will always return true. -// -// Dlaexc is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaexc(wantq bool, n int, t []float64, ldt int, q []float64, ldq int, j1, n1, n2 int, work []float64) (ok bool) { - switch { - case n < 0: - panic(nLT0) - case ldt < max(1, n): - panic(badLdT) - case wantq && ldt < max(1, n): - panic(badLdQ) - case j1 < 0 || n <= j1: - panic(badJ1) - case len(work) < n: - panic(shortWork) - case n1 < 0 || 2 < n1: - panic(badN1) - case n2 < 0 || 2 < n2: - panic(badN2) - } - - if n == 0 || n1 == 0 || n2 == 0 { - return true - } - - switch { - case len(t) < (n-1)*ldt+n: - panic(shortT) - case wantq && len(q) < (n-1)*ldq+n: - panic(shortQ) - } - - if j1+n1 >= n { - // TODO(vladimir-ch): Reference LAPACK does this check whether - // the start of the second block is in the matrix T. It returns - // true if it is not and moreover it does not check whether the - // whole second block fits into T. This does not feel - // satisfactory. The only caller of Dlaexc is Dtrexc, so if the - // caller makes sure that this does not happen, we could be - // stricter here. - return true - } - - j2 := j1 + 1 - j3 := j1 + 2 - - bi := blas64.Implementation() - - if n1 == 1 && n2 == 1 { - // Swap two 1×1 blocks. - t11 := t[j1*ldt+j1] - t22 := t[j2*ldt+j2] - - // Determine the transformation to perform the interchange. - cs, sn, _ := impl.Dlartg(t[j1*ldt+j2], t22-t11) - - // Apply transformation to the matrix T. - if n-j3 > 0 { - bi.Drot(n-j3, t[j1*ldt+j3:], 1, t[j2*ldt+j3:], 1, cs, sn) - } - if j1 > 0 { - bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) - } - - t[j1*ldt+j1] = t22 - t[j2*ldt+j2] = t11 - - if wantq { - // Accumulate transformation in the matrix Q. - bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) - } - - return true - } - - // Swapping involves at least one 2×2 block. - // - // Copy the diagonal block of order n1+n2 to the local array d and - // compute its norm. - nd := n1 + n2 - var d [16]float64 - const ldd = 4 - impl.Dlacpy(blas.All, nd, nd, t[j1*ldt+j1:], ldt, d[:], ldd) - dnorm := impl.Dlange(lapack.MaxAbs, nd, nd, d[:], ldd, work) - - // Compute machine-dependent threshold for test for accepting swap. - eps := dlamchP - thresh := math.Max(10*eps*dnorm, dlamchS/eps) - - // Solve T11*X - X*T22 = scale*T12 for X. - var x [4]float64 - const ldx = 2 - scale, _, _ := impl.Dlasy2(false, false, -1, n1, n2, d[:], ldd, d[n1*ldd+n1:], ldd, d[n1:], ldd, x[:], ldx) - - // Swap the adjacent diagonal blocks. - switch { - case n1 == 1 && n2 == 2: - // Generate elementary reflector H so that - // ( scale, X11, X12 ) H = ( 0, 0, * ) - u := [3]float64{scale, x[0], 1} - _, tau := impl.Dlarfg(3, x[1], u[:2], 1) - t11 := t[j1*ldt+j1] - - // Perform swap provisionally on diagonal block in d. - impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) - impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) - - // Test whether to reject swap. - if math.Max(math.Abs(d[2*ldd]), math.Max(math.Abs(d[2*ldd+1]), math.Abs(d[2*ldd+2]-t11))) > thresh { - return false - } - - // Accept swap: apply transformation to the entire matrix T. - impl.Dlarfx(blas.Left, 3, n-j1, u[:], tau, t[j1*ldt+j1:], ldt, work) - impl.Dlarfx(blas.Right, j2+1, 3, u[:], tau, t[j1:], ldt, work) - - t[j3*ldt+j1] = 0 - t[j3*ldt+j2] = 0 - t[j3*ldt+j3] = t11 - - if wantq { - // Accumulate transformation in the matrix Q. - impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) - } - - case n1 == 2 && n2 == 1: - // Generate elementary reflector H so that: - // H ( -X11 ) = ( * ) - // ( -X21 ) = ( 0 ) - // ( scale ) = ( 0 ) - u := [3]float64{1, -x[ldx], scale} - _, tau := impl.Dlarfg(3, -x[0], u[1:], 1) - t33 := t[j3*ldt+j3] - - // Perform swap provisionally on diagonal block in D. - impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) - impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) - - // Test whether to reject swap. - if math.Max(math.Abs(d[ldd]), math.Max(math.Abs(d[2*ldd]), math.Abs(d[0]-t33))) > thresh { - return false - } - - // Accept swap: apply transformation to the entire matrix T. - impl.Dlarfx(blas.Right, j3+1, 3, u[:], tau, t[j1:], ldt, work) - impl.Dlarfx(blas.Left, 3, n-j1-1, u[:], tau, t[j1*ldt+j2:], ldt, work) - - t[j1*ldt+j1] = t33 - t[j2*ldt+j1] = 0 - t[j3*ldt+j1] = 0 - - if wantq { - // Accumulate transformation in the matrix Q. - impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) - } - - default: // n1 == 2 && n2 == 2 - // Generate elementary reflectors H_1 and H_2 so that: - // H_2 H_1 ( -X11 -X12 ) = ( * * ) - // ( -X21 -X22 ) ( 0 * ) - // ( scale 0 ) ( 0 0 ) - // ( 0 scale ) ( 0 0 ) - u1 := [3]float64{1, -x[ldx], scale} - _, tau1 := impl.Dlarfg(3, -x[0], u1[1:], 1) - - temp := -tau1 * (x[1] + u1[1]*x[ldx+1]) - u2 := [3]float64{1, -temp * u1[2], scale} - _, tau2 := impl.Dlarfg(3, -temp*u1[1]-x[ldx+1], u2[1:], 1) - - // Perform swap provisionally on diagonal block in D. - impl.Dlarfx(blas.Left, 3, 4, u1[:], tau1, d[:], ldd, work) - impl.Dlarfx(blas.Right, 4, 3, u1[:], tau1, d[:], ldd, work) - impl.Dlarfx(blas.Left, 3, 4, u2[:], tau2, d[ldd:], ldd, work) - impl.Dlarfx(blas.Right, 4, 3, u2[:], tau2, d[1:], ldd, work) - - // Test whether to reject swap. - m1 := math.Max(math.Abs(d[2*ldd]), math.Abs(d[2*ldd+1])) - m2 := math.Max(math.Abs(d[3*ldd]), math.Abs(d[3*ldd+1])) - if math.Max(m1, m2) > thresh { - return false - } - - // Accept swap: apply transformation to the entire matrix T. - j4 := j1 + 3 - impl.Dlarfx(blas.Left, 3, n-j1, u1[:], tau1, t[j1*ldt+j1:], ldt, work) - impl.Dlarfx(blas.Right, j4+1, 3, u1[:], tau1, t[j1:], ldt, work) - impl.Dlarfx(blas.Left, 3, n-j1, u2[:], tau2, t[j2*ldt+j1:], ldt, work) - impl.Dlarfx(blas.Right, j4+1, 3, u2[:], tau2, t[j2:], ldt, work) - - t[j3*ldt+j1] = 0 - t[j3*ldt+j2] = 0 - t[j4*ldt+j1] = 0 - t[j4*ldt+j2] = 0 - - if wantq { - // Accumulate transformation in the matrix Q. - impl.Dlarfx(blas.Right, n, 3, u1[:], tau1, q[j1:], ldq, work) - impl.Dlarfx(blas.Right, n, 3, u2[:], tau2, q[j2:], ldq, work) - } - } - - if n2 == 2 { - // Standardize new 2×2 block T11. - a, b := t[j1*ldt+j1], t[j1*ldt+j2] - c, d := t[j2*ldt+j1], t[j2*ldt+j2] - var cs, sn float64 - t[j1*ldt+j1], t[j1*ldt+j2], t[j2*ldt+j1], t[j2*ldt+j2], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) - if n-j1-2 > 0 { - bi.Drot(n-j1-2, t[j1*ldt+j1+2:], 1, t[j2*ldt+j1+2:], 1, cs, sn) - } - if j1 > 0 { - bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) - } - if wantq { - bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) - } - } - if n1 == 2 { - // Standardize new 2×2 block T22. - j3 := j1 + n2 - j4 := j3 + 1 - a, b := t[j3*ldt+j3], t[j3*ldt+j4] - c, d := t[j4*ldt+j3], t[j4*ldt+j4] - var cs, sn float64 - t[j3*ldt+j3], t[j3*ldt+j4], t[j4*ldt+j3], t[j4*ldt+j4], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) - if n-j3-2 > 0 { - bi.Drot(n-j3-2, t[j3*ldt+j3+2:], 1, t[j4*ldt+j3+2:], 1, cs, sn) - } - bi.Drot(j3, t[j3:], ldt, t[j4:], ldt, cs, sn) - if wantq { - bi.Drot(n, q[j3:], ldq, q[j4:], ldq, cs, sn) - } - } - - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go deleted file mode 100644 index 6954deb42..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlags2 computes 2-by-2 orthogonal matrices U, V and Q with the -// triangles of A and B specified by upper. -// -// If upper is true -// -// U^T*A*Q = U^T*[ a1 a2 ]*Q = [ x 0 ] -// [ 0 a3 ] [ x x ] -// and -// V^T*B*Q = V^T*[ b1 b2 ]*Q = [ x 0 ] -// [ 0 b3 ] [ x x ] -// -// otherwise -// -// U^T*A*Q = U^T*[ a1 0 ]*Q = [ x x ] -// [ a2 a3 ] [ 0 x ] -// and -// V^T*B*Q = V^T*[ b1 0 ]*Q = [ x x ] -// [ b2 b3 ] [ 0 x ]. -// -// The rows of the transformed A and B are parallel, where -// -// U = [ csu snu ], V = [ csv snv ], Q = [ csq snq ] -// [ -snu csu ] [ -snv csv ] [ -snq csq ] -// -// Dlags2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlags2(upper bool, a1, a2, a3, b1, b2, b3 float64) (csu, snu, csv, snv, csq, snq float64) { - if upper { - // Input matrices A and B are upper triangular matrices. - // - // Form matrix C = A*adj(B) = [ a b ] - // [ 0 d ] - a := a1 * b3 - d := a3 * b1 - b := a2*b1 - a1*b2 - - // The SVD of real 2-by-2 triangular C. - // - // [ csl -snl ]*[ a b ]*[ csr snr ] = [ r 0 ] - // [ snl csl ] [ 0 d ] [ -snr csr ] [ 0 t ] - _, _, snr, csr, snl, csl := impl.Dlasv2(a, b, d) - - if math.Abs(csl) >= math.Abs(snl) || math.Abs(csr) >= math.Abs(snr) { - // Compute the [0, 0] and [0, 1] elements of U^T*A and V^T*B, - // and [0, 1] element of |U|^T*|A| and |V|^T*|B|. - - ua11r := csl * a1 - ua12 := csl*a2 + snl*a3 - - vb11r := csr * b1 - vb12 := csr*b2 + snr*b3 - - aua12 := math.Abs(csl)*math.Abs(a2) + math.Abs(snl)*math.Abs(a3) - avb12 := math.Abs(csr)*math.Abs(b2) + math.Abs(snr)*math.Abs(b3) - - // Zero [0, 1] elements of U^T*A and V^T*B. - if math.Abs(ua11r)+math.Abs(ua12) != 0 { - if aua12/(math.Abs(ua11r)+math.Abs(ua12)) <= avb12/(math.Abs(vb11r)+math.Abs(vb12)) { - csq, snq, _ = impl.Dlartg(-ua11r, ua12) - } else { - csq, snq, _ = impl.Dlartg(-vb11r, vb12) - } - } else { - csq, snq, _ = impl.Dlartg(-vb11r, vb12) - } - - csu = csl - snu = -snl - csv = csr - snv = -snr - } else { - // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, - // and [1, 1] element of |U|^T*|A| and |V|^T*|B|. - - ua21 := -snl * a1 - ua22 := -snl*a2 + csl*a3 - - vb21 := -snr * b1 - vb22 := -snr*b2 + csr*b3 - - aua22 := math.Abs(snl)*math.Abs(a2) + math.Abs(csl)*math.Abs(a3) - avb22 := math.Abs(snr)*math.Abs(b2) + math.Abs(csr)*math.Abs(b3) - - // Zero [1, 1] elements of U^T*A and V^T*B, and then swap. - if math.Abs(ua21)+math.Abs(ua22) != 0 { - if aua22/(math.Abs(ua21)+math.Abs(ua22)) <= avb22/(math.Abs(vb21)+math.Abs(vb22)) { - csq, snq, _ = impl.Dlartg(-ua21, ua22) - } else { - csq, snq, _ = impl.Dlartg(-vb21, vb22) - } - } else { - csq, snq, _ = impl.Dlartg(-vb21, vb22) - } - - csu = snl - snu = csl - csv = snr - snv = csr - } - } else { - // Input matrices A and B are lower triangular matrices - // - // Form matrix C = A*adj(B) = [ a 0 ] - // [ c d ] - a := a1 * b3 - d := a3 * b1 - c := a2*b3 - a3*b2 - - // The SVD of real 2-by-2 triangular C - // - // [ csl -snl ]*[ a 0 ]*[ csr snr ] = [ r 0 ] - // [ snl csl ] [ c d ] [ -snr csr ] [ 0 t ] - _, _, snr, csr, snl, csl := impl.Dlasv2(a, c, d) - - if math.Abs(csr) >= math.Abs(snr) || math.Abs(csl) >= math.Abs(snl) { - // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, - // and [1, 0] element of |U|^T*|A| and |V|^T*|B|. - - ua21 := -snr*a1 + csr*a2 - ua22r := csr * a3 - - vb21 := -snl*b1 + csl*b2 - vb22r := csl * b3 - - aua21 := math.Abs(snr)*math.Abs(a1) + math.Abs(csr)*math.Abs(a2) - avb21 := math.Abs(snl)*math.Abs(b1) + math.Abs(csl)*math.Abs(b2) - - // Zero [1, 0] elements of U^T*A and V^T*B. - if (math.Abs(ua21) + math.Abs(ua22r)) != 0 { - if aua21/(math.Abs(ua21)+math.Abs(ua22r)) <= avb21/(math.Abs(vb21)+math.Abs(vb22r)) { - csq, snq, _ = impl.Dlartg(ua22r, ua21) - } else { - csq, snq, _ = impl.Dlartg(vb22r, vb21) - } - } else { - csq, snq, _ = impl.Dlartg(vb22r, vb21) - } - - csu = csr - snu = -snr - csv = csl - snv = -snl - } else { - // Compute the [0, 0] and [0, 1] elements of U^T *A and V^T *B, - // and [0, 0] element of |U|^T*|A| and |V|^T*|B|. - - ua11 := csr*a1 + snr*a2 - ua12 := snr * a3 - - vb11 := csl*b1 + snl*b2 - vb12 := snl * b3 - - aua11 := math.Abs(csr)*math.Abs(a1) + math.Abs(snr)*math.Abs(a2) - avb11 := math.Abs(csl)*math.Abs(b1) + math.Abs(snl)*math.Abs(b2) - - // Zero [0, 0] elements of U^T*A and V^T*B, and then swap. - if (math.Abs(ua11) + math.Abs(ua12)) != 0 { - if aua11/(math.Abs(ua11)+math.Abs(ua12)) <= avb11/(math.Abs(vb11)+math.Abs(vb12)) { - csq, snq, _ = impl.Dlartg(ua12, ua11) - } else { - csq, snq, _ = impl.Dlartg(vb12, vb11) - } - } else { - csq, snq, _ = impl.Dlartg(vb12, vb11) - } - - csu = snr - snu = csr - csv = snl - snv = csl - } - } - - return csu, snu, csv, snv, csq, snq -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go deleted file mode 100644 index 00a869bce..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlahqr computes the eigenvalues and Schur factorization of a block of an n×n -// upper Hessenberg matrix H, using the double-shift/single-shift QR algorithm. -// -// h and ldh represent the matrix H. Dlahqr works primarily with the Hessenberg -// submatrix H[ilo:ihi+1,ilo:ihi+1], but applies transformations to all of H if -// wantt is true. It is assumed that H[ihi+1:n,ihi+1:n] is already upper -// quasi-triangular, although this is not checked. -// -// It must hold that -// 0 <= ilo <= max(0,ihi), and ihi < n, -// and that -// H[ilo,ilo-1] == 0, if ilo > 0, -// otherwise Dlahqr will panic. -// -// If unconverged is zero on return, wr[ilo:ihi+1] and wi[ilo:ihi+1] will contain -// respectively the real and imaginary parts of the computed eigenvalues ilo -// to ihi. If two eigenvalues are computed as a complex conjugate pair, they are -// stored in consecutive elements of wr and wi, say the i-th and (i+1)th, with -// wi[i] > 0 and wi[i+1] < 0. If wantt is true, the eigenvalues are stored in -// the same order as on the diagonal of the Schur form returned in H, with -// wr[i] = H[i,i], and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, -// wi[i] = sqrt(abs(H[i+1,i]*H[i,i+1])) and wi[i+1] = -wi[i]. -// -// wr and wi must have length ihi+1. -// -// z and ldz represent an n×n matrix Z. If wantz is true, the transformations -// will be applied to the submatrix Z[iloz:ihiz+1,ilo:ihi+1] and it must hold that -// 0 <= iloz <= ilo, and ihi <= ihiz < n. -// If wantz is false, z is not referenced. -// -// unconverged indicates whether Dlahqr computed all the eigenvalues ilo to ihi -// in a total of 30 iterations per eigenvalue. -// -// If unconverged is zero, all the eigenvalues ilo to ihi have been computed and -// will be stored on return in wr[ilo:ihi+1] and wi[ilo:ihi+1]. -// -// If unconverged is zero and wantt is true, H[ilo:ihi+1,ilo:ihi+1] will be -// overwritten on return by upper quasi-triangular full Schur form with any -// 2×2 diagonal blocks in standard form. -// -// If unconverged is zero and if wantt is false, the contents of h on return is -// unspecified. -// -// If unconverged is positive, some eigenvalues have not converged, and -// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] contain those eigenvalues -// which have been successfully computed. -// -// If unconverged is positive and wantt is true, then on return -// (initial H)*U = U*(final H), (*) -// where U is an orthogonal matrix. The final H is upper Hessenberg and -// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. -// -// If unconverged is positive and wantt is false, on return the remaining -// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix -// H[ilo:unconverged,ilo:unconverged]. -// -// If unconverged is positive and wantz is true, then on return -// (final Z) = (initial Z)*U, -// where U is the orthogonal matrix in (*) regardless of the value of wantt. -// -// Dlahqr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlahqr(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int) (unconverged int) { - switch { - case n < 0: - panic(nLT0) - case ilo < 0, max(0, ihi) < ilo: - panic(badIlo) - case ihi >= n: - panic(badIhi) - case ldh < max(1, n): - panic(badLdH) - case wantz && (iloz < 0 || ilo < iloz): - panic(badIloz) - case wantz && (ihiz < ihi || n <= ihiz): - panic(badIhiz) - case ldz < 1, wantz && ldz < n: - panic(badLdZ) - } - - // Quick return if possible. - if n == 0 { - return 0 - } - - switch { - case len(h) < (n-1)*ldh+n: - panic(shortH) - case len(wr) != ihi+1: - panic(shortWr) - case len(wi) != ihi+1: - panic(shortWi) - case wantz && len(z) < (n-1)*ldz+n: - panic(shortZ) - case ilo > 0 && h[ilo*ldh+ilo-1] != 0: - panic(notIsolated) - } - - if ilo == ihi { - wr[ilo] = h[ilo*ldh+ilo] - wi[ilo] = 0 - return 0 - } - - // Clear out the trash. - for j := ilo; j < ihi-2; j++ { - h[(j+2)*ldh+j] = 0 - h[(j+3)*ldh+j] = 0 - } - if ilo <= ihi-2 { - h[ihi*ldh+ihi-2] = 0 - } - - nh := ihi - ilo + 1 - nz := ihiz - iloz + 1 - - // Set machine-dependent constants for the stopping criterion. - ulp := dlamchP - smlnum := float64(nh) / ulp * dlamchS - - // i1 and i2 are the indices of the first row and last column of H to - // which transformations must be applied. If eigenvalues only are being - // computed, i1 and i2 are set inside the main loop. - var i1, i2 int - if wantt { - i1 = 0 - i2 = n - 1 - } - - itmax := 30 * max(10, nh) // Total number of QR iterations allowed. - - // The main loop begins here. i is the loop index and decreases from ihi - // to ilo in steps of 1 or 2. Each iteration of the loop works with the - // active submatrix in rows and columns l to i. Eigenvalues i+1 to ihi - // have already converged. Either l = ilo or H[l,l-1] is negligible so - // that the matrix splits. - bi := blas64.Implementation() - i := ihi - for i >= ilo { - l := ilo - - // Perform QR iterations on rows and columns ilo to i until a - // submatrix of order 1 or 2 splits off at the bottom because a - // subdiagonal element has become negligible. - converged := false - for its := 0; its <= itmax; its++ { - // Look for a single small subdiagonal element. - var k int - for k = i; k > l; k-- { - if math.Abs(h[k*ldh+k-1]) <= smlnum { - break - } - tst := math.Abs(h[(k-1)*ldh+k-1]) + math.Abs(h[k*ldh+k]) - if tst == 0 { - if k-2 >= ilo { - tst += math.Abs(h[(k-1)*ldh+k-2]) - } - if k+1 <= ihi { - tst += math.Abs(h[(k+1)*ldh+k]) - } - } - // The following is a conservative small - // subdiagonal deflation criterion due to Ahues - // & Tisseur (LAWN 122, 1997). It has better - // mathematical foundation and improves accuracy - // in some cases. - if math.Abs(h[k*ldh+k-1]) <= ulp*tst { - ab := math.Max(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) - ba := math.Min(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) - aa := math.Max(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) - bb := math.Min(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) - s := aa + ab - if ab/s*ba <= math.Max(smlnum, aa/s*bb*ulp) { - break - } - } - } - l = k - if l > ilo { - // H[l,l-1] is negligible. - h[l*ldh+l-1] = 0 - } - if l >= i-1 { - // Break the loop because a submatrix of order 1 - // or 2 has split off. - converged = true - break - } - - // Now the active submatrix is in rows and columns l to - // i. If eigenvalues only are being computed, only the - // active submatrix need be transformed. - if !wantt { - i1 = l - i2 = i - } - - const ( - dat1 = 3.0 - dat2 = -0.4375 - ) - var h11, h21, h12, h22 float64 - switch its { - case 10: // Exceptional shift. - s := math.Abs(h[(l+1)*ldh+l]) + math.Abs(h[(l+2)*ldh+l+1]) - h11 = dat1*s + h[l*ldh+l] - h12 = dat2 * s - h21 = s - h22 = h11 - case 20: // Exceptional shift. - s := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) - h11 = dat1*s + h[i*ldh+i] - h12 = dat2 * s - h21 = s - h22 = h11 - default: // Prepare to use Francis' double shift (i.e., - // 2nd degree generalized Rayleigh quotient). - h11 = h[(i-1)*ldh+i-1] - h21 = h[i*ldh+i-1] - h12 = h[(i-1)*ldh+i] - h22 = h[i*ldh+i] - } - s := math.Abs(h11) + math.Abs(h12) + math.Abs(h21) + math.Abs(h22) - var ( - rt1r, rt1i float64 - rt2r, rt2i float64 - ) - if s != 0 { - h11 /= s - h21 /= s - h12 /= s - h22 /= s - tr := (h11 + h22) / 2 - det := (h11-tr)*(h22-tr) - h12*h21 - rtdisc := math.Sqrt(math.Abs(det)) - if det >= 0 { - // Complex conjugate shifts. - rt1r = tr * s - rt2r = rt1r - rt1i = rtdisc * s - rt2i = -rt1i - } else { - // Real shifts (use only one of them). - rt1r = tr + rtdisc - rt2r = tr - rtdisc - if math.Abs(rt1r-h22) <= math.Abs(rt2r-h22) { - rt1r *= s - rt2r = rt1r - } else { - rt2r *= s - rt1r = rt2r - } - rt1i = 0 - rt2i = 0 - } - } - - // Look for two consecutive small subdiagonal elements. - var m int - var v [3]float64 - for m = i - 2; m >= l; m-- { - // Determine the effect of starting the - // double-shift QR iteration at row m, and see - // if this would make H[m,m-1] negligible. The - // following uses scaling to avoid overflows and - // most underflows. - h21s := h[(m+1)*ldh+m] - s := math.Abs(h[m*ldh+m]-rt2r) + math.Abs(rt2i) + math.Abs(h21s) - h21s /= s - v[0] = h21s*h[m*ldh+m+1] + (h[m*ldh+m]-rt1r)*((h[m*ldh+m]-rt2r)/s) - rt2i/s*rt1i - v[1] = h21s * (h[m*ldh+m] + h[(m+1)*ldh+m+1] - rt1r - rt2r) - v[2] = h21s * h[(m+2)*ldh+m+1] - s = math.Abs(v[0]) + math.Abs(v[1]) + math.Abs(v[2]) - v[0] /= s - v[1] /= s - v[2] /= s - if m == l { - break - } - dsum := math.Abs(h[(m-1)*ldh+m-1]) + math.Abs(h[m*ldh+m]) + math.Abs(h[(m+1)*ldh+m+1]) - if math.Abs(h[m*ldh+m-1])*(math.Abs(v[1])+math.Abs(v[2])) <= ulp*math.Abs(v[0])*dsum { - break - } - } - - // Double-shift QR step. - for k := m; k < i; k++ { - // The first iteration of this loop determines a - // reflection G from the vector V and applies it - // from left and right to H, thus creating a - // non-zero bulge below the subdiagonal. - // - // Each subsequent iteration determines a - // reflection G to restore the Hessenberg form - // in the (k-1)th column, and thus chases the - // bulge one step toward the bottom of the - // active submatrix. nr is the order of G. - - nr := min(3, i-k+1) - if k > m { - bi.Dcopy(nr, h[k*ldh+k-1:], ldh, v[:], 1) - } - var t0 float64 - v[0], t0 = impl.Dlarfg(nr, v[0], v[1:], 1) - if k > m { - h[k*ldh+k-1] = v[0] - h[(k+1)*ldh+k-1] = 0 - if k < i-1 { - h[(k+2)*ldh+k-1] = 0 - } - } else if m > l { - // Use the following instead of H[k,k-1] = -H[k,k-1] - // to avoid a bug when v[1] and v[2] underflow. - h[k*ldh+k-1] *= 1 - t0 - } - t1 := t0 * v[1] - if nr == 3 { - t2 := t0 * v[2] - - // Apply G from the left to transform - // the rows of the matrix in columns k - // to i2. - for j := k; j <= i2; j++ { - sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] + v[2]*h[(k+2)*ldh+j] - h[k*ldh+j] -= sum * t0 - h[(k+1)*ldh+j] -= sum * t1 - h[(k+2)*ldh+j] -= sum * t2 - } - - // Apply G from the right to transform - // the columns of the matrix in rows i1 - // to min(k+3,i). - for j := i1; j <= min(k+3, i); j++ { - sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] + v[2]*h[j*ldh+k+2] - h[j*ldh+k] -= sum * t0 - h[j*ldh+k+1] -= sum * t1 - h[j*ldh+k+2] -= sum * t2 - } - - if wantz { - // Accumulate transformations in the matrix Z. - for j := iloz; j <= ihiz; j++ { - sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] + v[2]*z[j*ldz+k+2] - z[j*ldz+k] -= sum * t0 - z[j*ldz+k+1] -= sum * t1 - z[j*ldz+k+2] -= sum * t2 - } - } - } else if nr == 2 { - // Apply G from the left to transform - // the rows of the matrix in columns k - // to i2. - for j := k; j <= i2; j++ { - sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] - h[k*ldh+j] -= sum * t0 - h[(k+1)*ldh+j] -= sum * t1 - } - - // Apply G from the right to transform - // the columns of the matrix in rows i1 - // to min(k+3,i). - for j := i1; j <= i; j++ { - sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] - h[j*ldh+k] -= sum * t0 - h[j*ldh+k+1] -= sum * t1 - } - - if wantz { - // Accumulate transformations in the matrix Z. - for j := iloz; j <= ihiz; j++ { - sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] - z[j*ldz+k] -= sum * t0 - z[j*ldz+k+1] -= sum * t1 - } - } - } - } - } - - if !converged { - // The QR iteration finished without splitting off a - // submatrix of order 1 or 2. - return i + 1 - } - - if l == i { - // H[i,i-1] is negligible: one eigenvalue has converged. - wr[i] = h[i*ldh+i] - wi[i] = 0 - } else if l == i-1 { - // H[i-1,i-2] is negligible: a pair of eigenvalues have converged. - - // Transform the 2×2 submatrix to standard Schur form, - // and compute and store the eigenvalues. - var cs, sn float64 - a, b := h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] - c, d := h[i*ldh+i-1], h[i*ldh+i] - a, b, c, d, wr[i-1], wi[i-1], wr[i], wi[i], cs, sn = impl.Dlanv2(a, b, c, d) - h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] = a, b - h[i*ldh+i-1], h[i*ldh+i] = c, d - - if wantt { - // Apply the transformation to the rest of H. - if i2 > i { - bi.Drot(i2-i, h[(i-1)*ldh+i+1:], 1, h[i*ldh+i+1:], 1, cs, sn) - } - bi.Drot(i-i1-1, h[i1*ldh+i-1:], ldh, h[i1*ldh+i:], ldh, cs, sn) - } - - if wantz { - // Apply the transformation to Z. - bi.Drot(nz, z[iloz*ldz+i-1:], ldz, z[iloz*ldz+i:], ldz, cs, sn) - } - } - - // Return to start of the main loop with new value of i. - i = l - 1 - } - return 0 -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go deleted file mode 100644 index a47dc8fed..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlahr2 reduces the first nb columns of a real general n×(n-k+1) matrix A so -// that elements below the k-th subdiagonal are zero. The reduction is performed -// by an orthogonal similarity transformation Q^T * A * Q. Dlahr2 returns the -// matrices V and T which determine Q as a block reflector I - V*T*V^T, and -// also the matrix Y = A * V * T. -// -// The matrix Q is represented as a product of nb elementary reflectors -// Q = H_0 * H_1 * ... * H_{nb-1}. -// Each H_i has the form -// H_i = I - tau[i] * v * v^T, -// where v is a real vector with v[0:i+k-1] = 0 and v[i+k-1] = 1. v[i+k:n] is -// stored on exit in A[i+k+1:n,i]. -// -// The elements of the vectors v together form the (n-k+1)×nb matrix -// V which is needed, with T and Y, to apply the transformation to the -// unreduced part of the matrix, using an update of the form -// A = (I - V*T*V^T) * (A - Y*V^T). -// -// On entry, a contains the n×(n-k+1) general matrix A. On return, the elements -// on and above the k-th subdiagonal in the first nb columns are overwritten -// with the corresponding elements of the reduced matrix; the elements below the -// k-th subdiagonal, with the slice tau, represent the matrix Q as a product of -// elementary reflectors. The other columns of A are unchanged. -// -// The contents of A on exit are illustrated by the following example -// with n = 7, k = 3 and nb = 2: -// [ a a a a a ] -// [ a a a a a ] -// [ a a a a a ] -// [ h h a a a ] -// [ v0 h a a a ] -// [ v0 v1 a a a ] -// [ v0 v1 a a a ] -// where a denotes an element of the original matrix A, h denotes a -// modified element of the upper Hessenberg matrix H, and vi denotes an -// element of the vector defining H_i. -// -// k is the offset for the reduction. Elements below the k-th subdiagonal in the -// first nb columns are reduced to zero. -// -// nb is the number of columns to be reduced. -// -// On entry, a represents the n×(n-k+1) matrix A. On return, the elements on and -// above the k-th subdiagonal in the first nb columns are overwritten with the -// corresponding elements of the reduced matrix. The elements below the k-th -// subdiagonal, with the slice tau, represent the matrix Q as a product of -// elementary reflectors. The other columns of A are unchanged. -// -// tau will contain the scalar factors of the elementary reflectors. It must -// have length at least nb. -// -// t and ldt represent the nb×nb upper triangular matrix T, and y and ldy -// represent the n×nb matrix Y. -// -// Dlahr2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlahr2(n, k, nb int, a []float64, lda int, tau, t []float64, ldt int, y []float64, ldy int) { - switch { - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case nb < 0: - panic(nbLT0) - case nb > n: - panic(nbGTN) - case lda < max(1, n-k+1): - panic(badLdA) - case ldt < max(1, nb): - panic(badLdT) - case ldy < max(1, nb): - panic(badLdY) - } - - // Quick return if possible. - if n < 0 { - return - } - - switch { - case len(a) < (n-1)*lda+n-k+1: - panic(shortA) - case len(tau) < nb: - panic(shortTau) - case len(t) < (nb-1)*ldt+nb: - panic(shortT) - case len(y) < (n-1)*ldy+nb: - panic(shortY) - } - - // Quick return if possible. - if n == 1 { - return - } - - bi := blas64.Implementation() - var ei float64 - for i := 0; i < nb; i++ { - if i > 0 { - // Update A[k:n,i]. - - // Update i-th column of A - Y * V^T. - bi.Dgemv(blas.NoTrans, n-k, i, - -1, y[k*ldy:], ldy, - a[(k+i-1)*lda:], 1, - 1, a[k*lda+i:], lda) - - // Apply I - V * T^T * V^T to this column (call it b) - // from the left, using the last column of T as - // workspace. - // Let V = [ V1 ] and b = [ b1 ] (first i rows) - // [ V2 ] [ b2 ] - // where V1 is unit lower triangular. - // - // w := V1^T * b1. - bi.Dcopy(i, a[k*lda+i:], lda, t[nb-1:], ldt) - bi.Dtrmv(blas.Lower, blas.Trans, blas.Unit, i, - a[k*lda:], lda, t[nb-1:], ldt) - - // w := w + V2^T * b2. - bi.Dgemv(blas.Trans, n-k-i, i, - 1, a[(k+i)*lda:], lda, - a[(k+i)*lda+i:], lda, - 1, t[nb-1:], ldt) - - // w := T^T * w. - bi.Dtrmv(blas.Upper, blas.Trans, blas.NonUnit, i, - t, ldt, t[nb-1:], ldt) - - // b2 := b2 - V2*w. - bi.Dgemv(blas.NoTrans, n-k-i, i, - -1, a[(k+i)*lda:], lda, - t[nb-1:], ldt, - 1, a[(k+i)*lda+i:], lda) - - // b1 := b1 - V1*w. - bi.Dtrmv(blas.Lower, blas.NoTrans, blas.Unit, i, - a[k*lda:], lda, t[nb-1:], ldt) - bi.Daxpy(i, -1, t[nb-1:], ldt, a[k*lda+i:], lda) - - a[(k+i-1)*lda+i-1] = ei - } - - // Generate the elementary reflector H_i to annihilate - // A[k+i+1:n,i]. - ei, tau[i] = impl.Dlarfg(n-k-i, a[(k+i)*lda+i], a[min(k+i+1, n-1)*lda+i:], lda) - a[(k+i)*lda+i] = 1 - - // Compute Y[k:n,i]. - bi.Dgemv(blas.NoTrans, n-k, n-k-i, - 1, a[k*lda+i+1:], lda, - a[(k+i)*lda+i:], lda, - 0, y[k*ldy+i:], ldy) - bi.Dgemv(blas.Trans, n-k-i, i, - 1, a[(k+i)*lda:], lda, - a[(k+i)*lda+i:], lda, - 0, t[i:], ldt) - bi.Dgemv(blas.NoTrans, n-k, i, - -1, y[k*ldy:], ldy, - t[i:], ldt, - 1, y[k*ldy+i:], ldy) - bi.Dscal(n-k, tau[i], y[k*ldy+i:], ldy) - - // Compute T[0:i,i]. - bi.Dscal(i, -tau[i], t[i:], ldt) - bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, - t, ldt, t[i:], ldt) - - t[i*ldt+i] = tau[i] - } - a[(k+nb-1)*lda+nb-1] = ei - - // Compute Y[0:k,0:nb]. - impl.Dlacpy(blas.All, k, nb, a[1:], lda, y, ldy) - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, k, nb, - 1, a[k*lda:], lda, y, ldy) - if n > k+nb { - bi.Dgemm(blas.NoTrans, blas.NoTrans, k, nb, n-k-nb, - 1, a[1+nb:], lda, - a[(k+nb)*lda:], lda, - 1, y, ldy) - } - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, k, nb, - 1, t, ldt, y, ldy) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go deleted file mode 100644 index ca0b2f78c..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlaln2 solves a linear equation or a system of 2 linear equations of the form -// (ca A - w D) X = scale B, if trans == false, -// (ca A^T - w D) X = scale B, if trans == true, -// where A is a na×na real matrix, ca is a real scalar, D is a na×na diagonal -// real matrix, w is a scalar, real if nw == 1, complex if nw == 2, and X and B -// are na×1 matrices, real if w is real, complex if w is complex. -// -// If w is complex, X and B are represented as na×2 matrices, the first column -// of each being the real part and the second being the imaginary part. -// -// na and nw must be 1 or 2, otherwise Dlaln2 will panic. -// -// d1 and d2 are the diagonal elements of D. d2 is not used if na == 1. -// -// wr and wi represent the real and imaginary part, respectively, of the scalar -// w. wi is not used if nw == 1. -// -// smin is the desired lower bound on the singular values of A. This should be -// a safe distance away from underflow or overflow, say, between -// (underflow/machine precision) and (overflow*machine precision). -// -// If both singular values of (ca A - w D) are less than smin, smin*identity -// will be used instead of (ca A - w D). If only one singular value is less than -// smin, one element of (ca A - w D) will be perturbed enough to make the -// smallest singular value roughly smin. If both singular values are at least -// smin, (ca A - w D) will not be perturbed. In any case, the perturbation will -// be at most some small multiple of max(smin, ulp*norm(ca A - w D)). The -// singular values are computed by infinity-norm approximations, and thus will -// only be correct to a factor of 2 or so. -// -// All input quantities are assumed to be smaller than overflow by a reasonable -// factor. -// -// scale is a scaling factor less than or equal to 1 which is chosen so that X -// can be computed without overflow. X is further scaled if necessary to assure -// that norm(ca A - w D)*norm(X) is less than overflow. -// -// xnorm contains the infinity-norm of X when X is regarded as a na×nw real -// matrix. -// -// ok will be false if (ca A - w D) had to be perturbed to make its smallest -// singular value greater than smin, otherwise ok will be true. -// -// Dlaln2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaln2(trans bool, na, nw int, smin, ca float64, a []float64, lda int, d1, d2 float64, b []float64, ldb int, wr, wi float64, x []float64, ldx int) (scale, xnorm float64, ok bool) { - // TODO(vladimir-ch): Consider splitting this function into two, one - // handling the real case (nw == 1) and the other handling the complex - // case (nw == 2). Given that Go has complex types, their signatures - // would be simpler and more natural, and the implementation not as - // convoluted. - - switch { - case na != 1 && na != 2: - panic(badNa) - case nw != 1 && nw != 2: - panic(badNw) - case lda < na: - panic(badLdA) - case len(a) < (na-1)*lda+na: - panic(shortA) - case ldb < nw: - panic(badLdB) - case len(b) < (na-1)*ldb+nw: - panic(shortB) - case ldx < nw: - panic(badLdX) - case len(x) < (na-1)*ldx+nw: - panic(shortX) - } - - smlnum := 2 * dlamchS - bignum := 1 / smlnum - smini := math.Max(smin, smlnum) - - ok = true - scale = 1 - - if na == 1 { - // 1×1 (i.e., scalar) system C X = B. - - if nw == 1 { - // Real 1×1 system. - - // C = ca A - w D. - csr := ca*a[0] - wr*d1 - cnorm := math.Abs(csr) - - // If |C| < smini, use C = smini. - if cnorm < smini { - csr = smini - cnorm = smini - ok = false - } - - // Check scaling for X = B / C. - bnorm := math.Abs(b[0]) - if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { - scale = 1 / bnorm - } - - // Compute X. - x[0] = b[0] * scale / csr - xnorm = math.Abs(x[0]) - - return scale, xnorm, ok - } - - // Complex 1×1 system (w is complex). - - // C = ca A - w D. - csr := ca*a[0] - wr*d1 - csi := -wi * d1 - cnorm := math.Abs(csr) + math.Abs(csi) - - // If |C| < smini, use C = smini. - if cnorm < smini { - csr = smini - csi = 0 - cnorm = smini - ok = false - } - - // Check scaling for X = B / C. - bnorm := math.Abs(b[0]) + math.Abs(b[1]) - if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { - scale = 1 / bnorm - } - - // Compute X. - cx := complex(scale*b[0], scale*b[1]) / complex(csr, csi) - x[0], x[1] = real(cx), imag(cx) - xnorm = math.Abs(x[0]) + math.Abs(x[1]) - - return scale, xnorm, ok - } - - // 2×2 system. - - // Compute the real part of - // C = ca A - w D - // or - // C = ca A^T - w D. - crv := [4]float64{ - ca*a[0] - wr*d1, - ca * a[1], - ca * a[lda], - ca*a[lda+1] - wr*d2, - } - if trans { - crv[1] = ca * a[lda] - crv[2] = ca * a[1] - } - - pivot := [4][4]int{ - {0, 1, 2, 3}, - {1, 0, 3, 2}, - {2, 3, 0, 1}, - {3, 2, 1, 0}, - } - - if nw == 1 { - // Real 2×2 system (w is real). - - // Find the largest element in C. - var cmax float64 - var icmax int - for j, v := range crv { - v = math.Abs(v) - if v > cmax { - cmax = v - icmax = j - } - } - - // If norm(C) < smini, use smini*identity. - if cmax < smini { - bnorm := math.Max(math.Abs(b[0]), math.Abs(b[ldb])) - if smini < 1 && bnorm > math.Max(1, bignum*smini) { - scale = 1 / bnorm - } - temp := scale / smini - x[0] = temp * b[0] - x[ldx] = temp * b[ldb] - xnorm = temp * bnorm - ok = false - - return scale, xnorm, ok - } - - // Gaussian elimination with complete pivoting. - // Form upper triangular matrix - // [ur11 ur12] - // [ 0 ur22] - ur11 := crv[icmax] - ur12 := crv[pivot[icmax][1]] - cr21 := crv[pivot[icmax][2]] - cr22 := crv[pivot[icmax][3]] - ur11r := 1 / ur11 - lr21 := ur11r * cr21 - ur22 := cr22 - ur12*lr21 - - // If smaller pivot < smini, use smini. - if math.Abs(ur22) < smini { - ur22 = smini - ok = false - } - - var br1, br2 float64 - if icmax > 1 { - // If the pivot lies in the second row, swap the rows. - br1 = b[ldb] - br2 = b[0] - } else { - br1 = b[0] - br2 = b[ldb] - } - br2 -= lr21 * br1 // Apply the Gaussian elimination step to the right-hand side. - - bbnd := math.Max(math.Abs(ur22*ur11r*br1), math.Abs(br2)) - if bbnd > 1 && math.Abs(ur22) < 1 && bbnd >= bignum*math.Abs(ur22) { - scale = 1 / bbnd - } - - // Solve the linear system ur*xr=br. - xr2 := br2 * scale / ur22 - xr1 := scale*br1*ur11r - ur11r*ur12*xr2 - if icmax&0x1 != 0 { - // If the pivot lies in the second column, swap the components of the solution. - x[0] = xr2 - x[ldx] = xr1 - } else { - x[0] = xr1 - x[ldx] = xr2 - } - xnorm = math.Max(math.Abs(xr1), math.Abs(xr2)) - - // Further scaling if norm(A)*norm(X) > overflow. - if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { - temp := cmax / bignum - x[0] *= temp - x[ldx] *= temp - xnorm *= temp - scale *= temp - } - - return scale, xnorm, ok - } - - // Complex 2×2 system (w is complex). - - // Find the largest element in C. - civ := [4]float64{ - -wi * d1, - 0, - 0, - -wi * d2, - } - var cmax float64 - var icmax int - for j, v := range crv { - v := math.Abs(v) - if v+math.Abs(civ[j]) > cmax { - cmax = v + math.Abs(civ[j]) - icmax = j - } - } - - // If norm(C) < smini, use smini*identity. - if cmax < smini { - br1 := math.Abs(b[0]) + math.Abs(b[1]) - br2 := math.Abs(b[ldb]) + math.Abs(b[ldb+1]) - bnorm := math.Max(br1, br2) - if smini < 1 && bnorm > 1 && bnorm > bignum*smini { - scale = 1 / bnorm - } - temp := scale / smini - x[0] = temp * b[0] - x[1] = temp * b[1] - x[ldb] = temp * b[ldb] - x[ldb+1] = temp * b[ldb+1] - xnorm = temp * bnorm - ok = false - - return scale, xnorm, ok - } - - // Gaussian elimination with complete pivoting. - ur11 := crv[icmax] - ui11 := civ[icmax] - ur12 := crv[pivot[icmax][1]] - ui12 := civ[pivot[icmax][1]] - cr21 := crv[pivot[icmax][2]] - ci21 := civ[pivot[icmax][2]] - cr22 := crv[pivot[icmax][3]] - ci22 := civ[pivot[icmax][3]] - var ( - ur11r, ui11r float64 - lr21, li21 float64 - ur12s, ui12s float64 - ur22, ui22 float64 - ) - if icmax == 0 || icmax == 3 { - // Off-diagonals of pivoted C are real. - if math.Abs(ur11) > math.Abs(ui11) { - temp := ui11 / ur11 - ur11r = 1 / (ur11 * (1 + temp*temp)) - ui11r = -temp * ur11r - } else { - temp := ur11 / ui11 - ui11r = -1 / (ui11 * (1 + temp*temp)) - ur11r = -temp * ui11r - } - lr21 = cr21 * ur11r - li21 = cr21 * ui11r - ur12s = ur12 * ur11r - ui12s = ur12 * ui11r - ur22 = cr22 - ur12*lr21 - ui22 = ci22 - ur12*li21 - } else { - // Diagonals of pivoted C are real. - ur11r = 1 / ur11 - // ui11r is already 0. - lr21 = cr21 * ur11r - li21 = ci21 * ur11r - ur12s = ur12 * ur11r - ui12s = ui12 * ur11r - ur22 = cr22 - ur12*lr21 + ui12*li21 - ui22 = -ur12*li21 - ui12*lr21 - } - u22abs := math.Abs(ur22) + math.Abs(ui22) - - // If smaller pivot < smini, use smini. - if u22abs < smini { - ur22 = smini - ui22 = 0 - ok = false - } - - var br1, bi1 float64 - var br2, bi2 float64 - if icmax > 1 { - // If the pivot lies in the second row, swap the rows. - br1 = b[ldb] - bi1 = b[ldb+1] - br2 = b[0] - bi2 = b[1] - } else { - br1 = b[0] - bi1 = b[1] - br2 = b[ldb] - bi2 = b[ldb+1] - } - br2 += -lr21*br1 + li21*bi1 - bi2 += -li21*br1 - lr21*bi1 - - bbnd1 := u22abs * (math.Abs(ur11r) + math.Abs(ui11r)) * (math.Abs(br1) + math.Abs(bi1)) - bbnd2 := math.Abs(br2) + math.Abs(bi2) - bbnd := math.Max(bbnd1, bbnd2) - if bbnd > 1 && u22abs < 1 && bbnd >= bignum*u22abs { - scale = 1 / bbnd - br1 *= scale - bi1 *= scale - br2 *= scale - bi2 *= scale - } - - cx2 := complex(br2, bi2) / complex(ur22, ui22) - xr2, xi2 := real(cx2), imag(cx2) - xr1 := ur11r*br1 - ui11r*bi1 - ur12s*xr2 + ui12s*xi2 - xi1 := ui11r*br1 + ur11r*bi1 - ui12s*xr2 - ur12s*xi2 - if icmax&0x1 != 0 { - // If the pivot lies in the second column, swap the components of the solution. - x[0] = xr2 - x[1] = xi2 - x[ldx] = xr1 - x[ldx+1] = xi1 - } else { - x[0] = xr1 - x[1] = xi1 - x[ldx] = xr2 - x[ldx+1] = xi2 - } - xnorm = math.Max(math.Abs(xr1)+math.Abs(xi1), math.Abs(xr2)+math.Abs(xi2)) - - // Further scaling if norm(A)*norm(X) > overflow. - if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { - temp := cmax / bignum - x[0] *= temp - x[1] *= temp - x[ldx] *= temp - x[ldx+1] *= temp - xnorm *= temp - scale *= temp - } - - return scale, xnorm, ok -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go deleted file mode 100644 index d9eeb840c..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/lapack" -) - -// Dlange computes the matrix norm of the general m×n matrix a. The input norm -// specifies the norm computed. -// lapack.MaxAbs: the maximum absolute value of an element. -// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. -// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. -// lapack.Frobenius: the square root of the sum of the squares of the entries. -// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. -// There are no restrictions on work for the other matrix norms. -func (impl Implementation) Dlange(norm lapack.MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 { - // TODO(btracey): These should probably be refactored to use BLAS calls. - switch { - case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: - panic(badNorm) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return 0 - } - - switch { - case len(a) < (m-1)*lda+n: - panic(badLdA) - case norm == lapack.MaxColumnSum && len(work) < n: - panic(shortWork) - } - - if norm == lapack.MaxAbs { - var value float64 - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - value = math.Max(value, math.Abs(a[i*lda+j])) - } - } - return value - } - if norm == lapack.MaxColumnSum { - for i := 0; i < n; i++ { - work[i] = 0 - } - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - work[j] += math.Abs(a[i*lda+j]) - } - } - var value float64 - for i := 0; i < n; i++ { - value = math.Max(value, work[i]) - } - return value - } - if norm == lapack.MaxRowSum { - var value float64 - for i := 0; i < m; i++ { - var sum float64 - for j := 0; j < n; j++ { - sum += math.Abs(a[i*lda+j]) - } - value = math.Max(value, sum) - } - return value - } - // norm == lapack.Frobenius - var value float64 - scale := 0.0 - sum := 1.0 - for i := 0; i < m; i++ { - scale, sum = impl.Dlassq(n, a[i*lda:], 1, scale, sum) - } - value = scale * math.Sqrt(sum) - return value -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go deleted file mode 100644 index 9ca1897e3..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/lapack" -) - -// Dlanst computes the specified norm of a symmetric tridiagonal matrix A. -// The diagonal elements of A are stored in d and the off-diagonal elements -// are stored in e. -func (impl Implementation) Dlanst(norm lapack.MatrixNorm, n int, d, e []float64) float64 { - switch { - case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: - panic(badNorm) - case n < 0: - panic(nLT0) - } - if n == 0 { - return 0 - } - switch { - case len(d) < n: - panic(shortD) - case len(e) < n-1: - panic(shortE) - } - - switch norm { - default: - panic(badNorm) - case lapack.MaxAbs: - anorm := math.Abs(d[n-1]) - for i := 0; i < n-1; i++ { - sum := math.Abs(d[i]) - if anorm < sum || math.IsNaN(sum) { - anorm = sum - } - sum = math.Abs(e[i]) - if anorm < sum || math.IsNaN(sum) { - anorm = sum - } - } - return anorm - case lapack.MaxColumnSum, lapack.MaxRowSum: - if n == 1 { - return math.Abs(d[0]) - } - anorm := math.Abs(d[0]) + math.Abs(e[0]) - sum := math.Abs(e[n-2]) + math.Abs(d[n-1]) - if anorm < sum || math.IsNaN(sum) { - anorm = sum - } - for i := 1; i < n-1; i++ { - sum := math.Abs(d[i]) + math.Abs(e[i]) + math.Abs(e[i-1]) - if anorm < sum || math.IsNaN(sum) { - anorm = sum - } - } - return anorm - case lapack.Frobenius: - var scale float64 - sum := 1.0 - if n > 1 { - scale, sum = impl.Dlassq(n-1, e, 1, scale, sum) - sum = 2 * sum - } - scale, sum = impl.Dlassq(n, d, 1, scale, sum) - return scale * math.Sqrt(sum) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go deleted file mode 100644 index 97ba5b243..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dlansy computes the specified norm of an n×n symmetric matrix. If -// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length -// at least n, otherwise work is unused. -func (impl Implementation) Dlansy(norm lapack.MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 { - switch { - case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: - panic(badNorm) - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return 0 - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case (norm == lapack.MaxColumnSum || norm == lapack.MaxRowSum) && len(work) < n: - panic(shortWork) - } - - switch norm { - default: - panic(badNorm) - case lapack.MaxAbs: - if uplo == blas.Upper { - var max float64 - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - v := math.Abs(a[i*lda+j]) - if math.IsNaN(v) { - return math.NaN() - } - if v > max { - max = v - } - } - } - return max - } - var max float64 - for i := 0; i < n; i++ { - for j := 0; j <= i; j++ { - v := math.Abs(a[i*lda+j]) - if math.IsNaN(v) { - return math.NaN() - } - if v > max { - max = v - } - } - } - return max - case lapack.MaxRowSum, lapack.MaxColumnSum: - // A symmetric matrix has the same 1-norm and ∞-norm. - for i := 0; i < n; i++ { - work[i] = 0 - } - if uplo == blas.Upper { - for i := 0; i < n; i++ { - work[i] += math.Abs(a[i*lda+i]) - for j := i + 1; j < n; j++ { - v := math.Abs(a[i*lda+j]) - work[i] += v - work[j] += v - } - } - } else { - for i := 0; i < n; i++ { - for j := 0; j < i; j++ { - v := math.Abs(a[i*lda+j]) - work[i] += v - work[j] += v - } - work[i] += math.Abs(a[i*lda+i]) - } - } - var max float64 - for i := 0; i < n; i++ { - v := work[i] - if math.IsNaN(v) { - return math.NaN() - } - if v > max { - max = v - } - } - return max - case lapack.Frobenius: - if uplo == blas.Upper { - var sum float64 - for i := 0; i < n; i++ { - v := a[i*lda+i] - sum += v * v - for j := i + 1; j < n; j++ { - v := a[i*lda+j] - sum += 2 * v * v - } - } - return math.Sqrt(sum) - } - var sum float64 - for i := 0; i < n; i++ { - for j := 0; j < i; j++ { - v := a[i*lda+j] - sum += 2 * v * v - } - v := a[i*lda+i] - sum += v * v - } - return math.Sqrt(sum) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go deleted file mode 100644 index cc96391d9..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dlantr computes the specified norm of an m×n trapezoidal matrix A. If -// norm == lapack.MaxColumnSum work must have length at least n, otherwise work -// is unused. -func (impl Implementation) Dlantr(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 { - switch { - case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: - panic(badNorm) - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case diag != blas.Unit && diag != blas.NonUnit: - panic(badDiag) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - minmn := min(m, n) - if minmn == 0 { - return 0 - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case norm == lapack.MaxColumnSum && len(work) < n: - panic(shortWork) - } - - switch norm { - default: - panic(badNorm) - case lapack.MaxAbs: - if diag == blas.Unit { - value := 1.0 - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i + 1; j < n; j++ { - tmp := math.Abs(a[i*lda+j]) - if math.IsNaN(tmp) { - return tmp - } - if tmp > value { - value = tmp - } - } - } - return value - } - for i := 1; i < m; i++ { - for j := 0; j < min(i, n); j++ { - tmp := math.Abs(a[i*lda+j]) - if math.IsNaN(tmp) { - return tmp - } - if tmp > value { - value = tmp - } - } - } - return value - } - var value float64 - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i; j < n; j++ { - tmp := math.Abs(a[i*lda+j]) - if math.IsNaN(tmp) { - return tmp - } - if tmp > value { - value = tmp - } - } - } - return value - } - for i := 0; i < m; i++ { - for j := 0; j <= min(i, n-1); j++ { - tmp := math.Abs(a[i*lda+j]) - if math.IsNaN(tmp) { - return tmp - } - if tmp > value { - value = tmp - } - } - } - return value - case lapack.MaxColumnSum: - if diag == blas.Unit { - for i := 0; i < minmn; i++ { - work[i] = 1 - } - for i := minmn; i < n; i++ { - work[i] = 0 - } - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i + 1; j < n; j++ { - work[j] += math.Abs(a[i*lda+j]) - } - } - } else { - for i := 1; i < m; i++ { - for j := 0; j < min(i, n); j++ { - work[j] += math.Abs(a[i*lda+j]) - } - } - } - } else { - for i := 0; i < n; i++ { - work[i] = 0 - } - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i; j < n; j++ { - work[j] += math.Abs(a[i*lda+j]) - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j <= min(i, n-1); j++ { - work[j] += math.Abs(a[i*lda+j]) - } - } - } - } - var max float64 - for _, v := range work[:n] { - if math.IsNaN(v) { - return math.NaN() - } - if v > max { - max = v - } - } - return max - case lapack.MaxRowSum: - var maxsum float64 - if diag == blas.Unit { - if uplo == blas.Upper { - for i := 0; i < m; i++ { - var sum float64 - if i < minmn { - sum = 1 - } - for j := i + 1; j < n; j++ { - sum += math.Abs(a[i*lda+j]) - } - if math.IsNaN(sum) { - return math.NaN() - } - if sum > maxsum { - maxsum = sum - } - } - return maxsum - } else { - for i := 1; i < m; i++ { - var sum float64 - if i < minmn { - sum = 1 - } - for j := 0; j < min(i, n); j++ { - sum += math.Abs(a[i*lda+j]) - } - if math.IsNaN(sum) { - return math.NaN() - } - if sum > maxsum { - maxsum = sum - } - } - return maxsum - } - } else { - if uplo == blas.Upper { - for i := 0; i < m; i++ { - var sum float64 - for j := i; j < n; j++ { - sum += math.Abs(a[i*lda+j]) - } - if math.IsNaN(sum) { - return sum - } - if sum > maxsum { - maxsum = sum - } - } - return maxsum - } else { - for i := 0; i < m; i++ { - var sum float64 - for j := 0; j <= min(i, n-1); j++ { - sum += math.Abs(a[i*lda+j]) - } - if math.IsNaN(sum) { - return sum - } - if sum > maxsum { - maxsum = sum - } - } - return maxsum - } - } - case lapack.Frobenius: - var nrm float64 - if diag == blas.Unit { - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i + 1; j < n; j++ { - tmp := a[i*lda+j] - nrm += tmp * tmp - } - } - } else { - for i := 1; i < m; i++ { - for j := 0; j < min(i, n); j++ { - tmp := a[i*lda+j] - nrm += tmp * tmp - } - } - } - nrm += float64(minmn) - } else { - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i; j < n; j++ { - tmp := math.Abs(a[i*lda+j]) - nrm += tmp * tmp - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j <= min(i, n-1); j++ { - tmp := math.Abs(a[i*lda+j]) - nrm += tmp * tmp - } - } - } - } - return math.Sqrt(nrm) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go deleted file mode 100644 index e5dcfb752..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlanv2 computes the Schur factorization of a real 2×2 matrix: -// [ a b ] = [ cs -sn ] * [ aa bb ] * [ cs sn ] -// [ c d ] [ sn cs ] [ cc dd ] * [-sn cs ] -// If cc is zero, aa and dd are real eigenvalues of the matrix. Otherwise it -// holds that aa = dd and bb*cc < 0, and aa ± sqrt(bb*cc) are complex conjugate -// eigenvalues. The real and imaginary parts of the eigenvalues are returned in -// (rt1r,rt1i) and (rt2r,rt2i). -func (impl Implementation) Dlanv2(a, b, c, d float64) (aa, bb, cc, dd float64, rt1r, rt1i, rt2r, rt2i float64, cs, sn float64) { - switch { - case c == 0: // Matrix is already upper triangular. - aa = a - bb = b - cc = 0 - dd = d - cs = 1 - sn = 0 - case b == 0: // Matrix is lower triangular, swap rows and columns. - aa = d - bb = -c - cc = 0 - dd = a - cs = 0 - sn = 1 - case a == d && math.Signbit(b) != math.Signbit(c): // Matrix is already in the standard Schur form. - aa = a - bb = b - cc = c - dd = d - cs = 1 - sn = 0 - default: - temp := a - d - p := temp / 2 - bcmax := math.Max(math.Abs(b), math.Abs(c)) - bcmis := math.Min(math.Abs(b), math.Abs(c)) - if b*c < 0 { - bcmis *= -1 - } - scale := math.Max(math.Abs(p), bcmax) - z := p/scale*p + bcmax/scale*bcmis - eps := dlamchP - - if z >= 4*eps { - // Real eigenvalues. Compute aa and dd. - if p > 0 { - z = p + math.Sqrt(scale)*math.Sqrt(z) - } else { - z = p - math.Sqrt(scale)*math.Sqrt(z) - } - aa = d + z - dd = d - bcmax/z*bcmis - // Compute bb and the rotation matrix. - tau := impl.Dlapy2(c, z) - cs = z / tau - sn = c / tau - bb = b - c - cc = 0 - } else { - // Complex eigenvalues, or real (almost) equal eigenvalues. - // Make diagonal elements equal. - sigma := b + c - tau := impl.Dlapy2(sigma, temp) - cs = math.Sqrt((1 + math.Abs(sigma)/tau) / 2) - sn = -p / (tau * cs) - if sigma < 0 { - sn *= -1 - } - // Compute [ aa bb ] = [ a b ] [ cs -sn ] - // [ cc dd ] [ c d ] [ sn cs ] - aa = a*cs + b*sn - bb = -a*sn + b*cs - cc = c*cs + d*sn - dd = -c*sn + d*cs - // Compute [ a b ] = [ cs sn ] [ aa bb ] - // [ c d ] [-sn cs ] [ cc dd ] - a = aa*cs + cc*sn - b = bb*cs + dd*sn - c = -aa*sn + cc*cs - d = -bb*sn + dd*cs - - temp = (a + d) / 2 - aa = temp - bb = b - cc = c - dd = temp - - if cc != 0 { - if bb != 0 { - if math.Signbit(bb) == math.Signbit(cc) { - // Real eigenvalues, reduce to - // upper triangular form. - sab := math.Sqrt(math.Abs(bb)) - sac := math.Sqrt(math.Abs(cc)) - p = sab * sac - if cc < 0 { - p *= -1 - } - tau = 1 / math.Sqrt(math.Abs(bb+cc)) - aa = temp + p - bb = bb - cc - cc = 0 - dd = temp - p - cs1 := sab * tau - sn1 := sac * tau - cs, sn = cs*cs1-sn*sn1, cs*sn1+sn+cs1 - } - } else { - bb = -cc - cc = 0 - cs, sn = -sn, cs - } - } - } - } - - // Store eigenvalues in (rt1r,rt1i) and (rt2r,rt2i). - rt1r = aa - rt2r = dd - if cc != 0 { - rt1i = math.Sqrt(math.Abs(bb)) * math.Sqrt(math.Abs(cc)) - rt2i = -rt1i - } - return -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go deleted file mode 100644 index bf98c338e..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas/blas64" - -// Dlapll returns the smallest singular value of the n×2 matrix A = [ x y ]. -// The function first computes the QR factorization of A = Q*R, and then computes -// the SVD of the 2-by-2 upper triangular matrix r. -// -// The contents of x and y are overwritten during the call. -// -// Dlapll is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlapll(n int, x []float64, incX int, y []float64, incY int) float64 { - switch { - case n < 0: - panic(nLT0) - case incX <= 0: - panic(badIncX) - case incY <= 0: - panic(badIncY) - } - - // Quick return if possible. - if n == 0 { - return 0 - } - - switch { - case len(x) < 1+(n-1)*incX: - panic(shortX) - case len(y) < 1+(n-1)*incY: - panic(shortY) - } - - // Quick return if possible. - if n == 1 { - return 0 - } - - // Compute the QR factorization of the N-by-2 matrix [ X Y ]. - a00, tau := impl.Dlarfg(n, x[0], x[incX:], incX) - x[0] = 1 - - bi := blas64.Implementation() - c := -tau * bi.Ddot(n, x, incX, y, incY) - bi.Daxpy(n, c, x, incX, y, incY) - a11, _ := impl.Dlarfg(n-1, y[incY], y[2*incY:], incY) - - // Compute the SVD of 2-by-2 upper triangular matrix. - ssmin, _ := impl.Dlas2(a00, y[0], a11) - return ssmin -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go deleted file mode 100644 index 55f1567f3..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas/blas64" - -// Dlapmt rearranges the columns of the m×n matrix X as specified by the -// permutation k_0, k_1, ..., k_n-1 of the integers 0, ..., n-1. -// -// If forward is true a forward permutation is performed: -// -// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. -// -// otherwise a backward permutation is performed: -// -// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. -// -// k must have length n, otherwise Dlapmt will panic. k is zero-indexed. -func (impl Implementation) Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case ldx < max(1, n): - panic(badLdX) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - switch { - case len(x) < (m-1)*ldx+n: - panic(shortX) - case len(k) != n: - panic(badLenK) - } - - // Quick return if possible. - if n == 1 { - return - } - - for i, v := range k { - v++ - k[i] = -v - } - - bi := blas64.Implementation() - - if forward { - for j, v := range k { - if v >= 0 { - continue - } - k[j] = -v - i := -v - 1 - for k[i] < 0 { - bi.Dswap(m, x[j:], ldx, x[i:], ldx) - - k[i] = -k[i] - j = i - i = k[i] - 1 - } - } - } else { - for i, v := range k { - if v >= 0 { - continue - } - k[i] = -v - j := -v - 1 - for j != i { - bi.Dswap(m, x[j:], ldx, x[i:], ldx) - - k[j] = -k[j] - j = k[j] - 1 - } - } - } - - for i := range k { - k[i]-- - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go deleted file mode 100644 index 19f73ffab..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlapy2 is the LAPACK version of math.Hypot. -// -// Dlapy2 is an internal routine. It is exported for testing purposes. -func (Implementation) Dlapy2(x, y float64) float64 { - return math.Hypot(x, y) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go deleted file mode 100644 index d3a0def63..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlaqp2 computes a QR factorization with column pivoting of the block A[offset:m, 0:n] -// of the m×n matrix A. The block A[0:offset, 0:n] is accordingly pivoted, but not factorized. -// -// On exit, the upper triangle of block A[offset:m, 0:n] is the triangular factor obtained. -// The elements in block A[offset:m, 0:n] below the diagonal, together with tau, represent -// the orthogonal matrix Q as a product of elementary reflectors. -// -// offset is number of rows of the matrix A that must be pivoted but not factorized. -// offset must not be negative otherwise Dlaqp2 will panic. -// -// On exit, jpvt holds the permutation that was applied; the jth column of A*P was the -// jpvt[j] column of A. jpvt must have length n, otherwise Dlaqp2 will panic. -// -// On exit tau holds the scalar factors of the elementary reflectors. It must have length -// at least min(m-offset, n) otherwise Dlaqp2 will panic. -// -// vn1 and vn2 hold the partial and complete column norms respectively. They must have length n, -// otherwise Dlaqp2 will panic. -// -// work must have length n, otherwise Dlaqp2 will panic. -// -// Dlaqp2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaqp2(m, n, offset int, a []float64, lda int, jpvt []int, tau, vn1, vn2, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case offset < 0: - panic(offsetLT0) - case offset > m: - panic(offsetGTM) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - mn := min(m-offset, n) - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(jpvt) != n: - panic(badLenJpvt) - case len(tau) < mn: - panic(shortTau) - case len(vn1) < n: - panic(shortVn1) - case len(vn2) < n: - panic(shortVn2) - case len(work) < n: - panic(shortWork) - } - - tol3z := math.Sqrt(dlamchE) - - bi := blas64.Implementation() - - // Compute factorization. - for i := 0; i < mn; i++ { - offpi := offset + i - - // Determine ith pivot column and swap if necessary. - p := i + bi.Idamax(n-i, vn1[i:], 1) - if p != i { - bi.Dswap(m, a[p:], lda, a[i:], lda) - jpvt[p], jpvt[i] = jpvt[i], jpvt[p] - vn1[p] = vn1[i] - vn2[p] = vn2[i] - } - - // Generate elementary reflector H_i. - if offpi < m-1 { - a[offpi*lda+i], tau[i] = impl.Dlarfg(m-offpi, a[offpi*lda+i], a[(offpi+1)*lda+i:], lda) - } else { - tau[i] = 0 - } - - if i < n-1 { - // Apply H_i^T to A[offset+i:m, i:n] from the left. - aii := a[offpi*lda+i] - a[offpi*lda+i] = 1 - impl.Dlarf(blas.Left, m-offpi, n-i-1, a[offpi*lda+i:], lda, tau[i], a[offpi*lda+i+1:], lda, work) - a[offpi*lda+i] = aii - } - - // Update partial column norms. - for j := i + 1; j < n; j++ { - if vn1[j] == 0 { - continue - } - - // The following marked lines follow from the - // analysis in Lapack Working Note 176. - r := math.Abs(a[offpi*lda+j]) / vn1[j] // * - temp := math.Max(0, 1-r*r) // * - r = vn1[j] / vn2[j] // * - temp2 := temp * r * r // * - if temp2 < tol3z { - var v float64 - if offpi < m-1 { - v = bi.Dnrm2(m-offpi-1, a[(offpi+1)*lda+j:], lda) - } - vn1[j] = v - vn2[j] = v - } else { - vn1[j] *= math.Sqrt(temp) // * - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go deleted file mode 100644 index dd683b62a..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlaqps computes a step of QR factorization with column pivoting -// of an m×n matrix A by using Blas-3. It tries to factorize nb -// columns from A starting from the row offset, and updates all -// of the matrix with Dgemm. -// -// In some cases, due to catastrophic cancellations, it cannot -// factorize nb columns. Hence, the actual number of factorized -// columns is returned in kb. -// -// Dlaqps computes a QR factorization with column pivoting of the -// block A[offset:m, 0:nb] of the m×n matrix A. The block -// A[0:offset, 0:n] is accordingly pivoted, but not factorized. -// -// On exit, the upper triangle of block A[offset:m, 0:kb] is the -// triangular factor obtained. The elements in block A[offset:m, 0:n] -// below the diagonal, together with tau, represent the orthogonal -// matrix Q as a product of elementary reflectors. -// -// offset is number of rows of the matrix A that must be pivoted but -// not factorized. offset must not be negative otherwise Dlaqps will panic. -// -// On exit, jpvt holds the permutation that was applied; the jth column -// of A*P was the jpvt[j] column of A. jpvt must have length n, -// otherwise Dlapqs will panic. -// -// On exit tau holds the scalar factors of the elementary reflectors. -// It must have length nb, otherwise Dlapqs will panic. -// -// vn1 and vn2 hold the partial and complete column norms respectively. -// They must have length n, otherwise Dlapqs will panic. -// -// auxv must have length nb, otherwise Dlaqps will panic. -// -// f and ldf represent an n×nb matrix F that is overwritten during the -// call. -// -// Dlaqps is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaqps(m, n, offset, nb int, a []float64, lda int, jpvt []int, tau, vn1, vn2, auxv, f []float64, ldf int) (kb int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case offset < 0: - panic(offsetLT0) - case offset > m: - panic(offsetGTM) - case nb < 0: - panic(nbLT0) - case nb > n: - panic(nbGTN) - case lda < max(1, n): - panic(badLdA) - case ldf < max(1, nb): - panic(badLdF) - } - - if m == 0 || n == 0 { - return 0 - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(jpvt) != n: - panic(badLenJpvt) - case len(vn1) < n: - panic(shortVn1) - case len(vn2) < n: - panic(shortVn2) - } - - if nb == 0 { - return 0 - } - - switch { - case len(tau) < nb: - panic(shortTau) - case len(auxv) < nb: - panic(shortAuxv) - case len(f) < (n-1)*ldf+nb: - panic(shortF) - } - - if offset == m { - return 0 - } - - lastrk := min(m, n+offset) - lsticc := -1 - tol3z := math.Sqrt(dlamchE) - - bi := blas64.Implementation() - - var k, rk int - for ; k < nb && lsticc == -1; k++ { - rk = offset + k - - // Determine kth pivot column and swap if necessary. - p := k + bi.Idamax(n-k, vn1[k:], 1) - if p != k { - bi.Dswap(m, a[p:], lda, a[k:], lda) - bi.Dswap(k, f[p*ldf:], 1, f[k*ldf:], 1) - jpvt[p], jpvt[k] = jpvt[k], jpvt[p] - vn1[p] = vn1[k] - vn2[p] = vn2[k] - } - - // Apply previous Householder reflectors to column K: - // - // A[rk:m, k] = A[rk:m, k] - A[rk:m, 0:k-1]*F[k, 0:k-1]^T. - if k > 0 { - bi.Dgemv(blas.NoTrans, m-rk, k, -1, - a[rk*lda:], lda, - f[k*ldf:], 1, - 1, - a[rk*lda+k:], lda) - } - - // Generate elementary reflector H_k. - if rk < m-1 { - a[rk*lda+k], tau[k] = impl.Dlarfg(m-rk, a[rk*lda+k], a[(rk+1)*lda+k:], lda) - } else { - tau[k] = 0 - } - - akk := a[rk*lda+k] - a[rk*lda+k] = 1 - - // Compute kth column of F: - // - // Compute F[k+1:n, k] = tau[k]*A[rk:m, k+1:n]^T*A[rk:m, k]. - if k < n-1 { - bi.Dgemv(blas.Trans, m-rk, n-k-1, tau[k], - a[rk*lda+k+1:], lda, - a[rk*lda+k:], lda, - 0, - f[(k+1)*ldf+k:], ldf) - } - - // Padding F[0:k, k] with zeros. - for j := 0; j < k; j++ { - f[j*ldf+k] = 0 - } - - // Incremental updating of F: - // - // F[0:n, k] := F[0:n, k] - tau[k]*F[0:n, 0:k-1]*A[rk:m, 0:k-1]^T*A[rk:m,k]. - if k > 0 { - bi.Dgemv(blas.Trans, m-rk, k, -tau[k], - a[rk*lda:], lda, - a[rk*lda+k:], lda, - 0, - auxv, 1) - bi.Dgemv(blas.NoTrans, n, k, 1, - f, ldf, - auxv, 1, - 1, - f[k:], ldf) - } - - // Update the current row of A: - // - // A[rk, k+1:n] = A[rk, k+1:n] - A[rk, 0:k]*F[k+1:n, 0:k]^T. - if k < n-1 { - bi.Dgemv(blas.NoTrans, n-k-1, k+1, -1, - f[(k+1)*ldf:], ldf, - a[rk*lda:], 1, - 1, - a[rk*lda+k+1:], 1) - } - - // Update partial column norms. - if rk < lastrk-1 { - for j := k + 1; j < n; j++ { - if vn1[j] == 0 { - continue - } - - // The following marked lines follow from the - // analysis in Lapack Working Note 176. - r := math.Abs(a[rk*lda+j]) / vn1[j] // * - temp := math.Max(0, 1-r*r) // * - r = vn1[j] / vn2[j] // * - temp2 := temp * r * r // * - if temp2 < tol3z { - // vn2 is used here as a collection of - // indices into vn2 and also a collection - // of column norms. - vn2[j] = float64(lsticc) - lsticc = j - } else { - vn1[j] *= math.Sqrt(temp) // * - } - } - } - - a[rk*lda+k] = akk - } - kb = k - rk = offset + kb - - // Apply the block reflector to the rest of the matrix: - // - // A[offset+kb+1:m, kb+1:n] := A[offset+kb+1:m, kb+1:n] - A[offset+kb+1:m, 1:kb]*F[kb+1:n, 1:kb]^T. - if kb < min(n, m-offset) { - bi.Dgemm(blas.NoTrans, blas.Trans, - m-rk, n-kb, kb, -1, - a[rk*lda:], lda, - f[kb*ldf:], ldf, - 1, - a[rk*lda+kb:], lda) - } - - // Recomputation of difficult columns. - for lsticc >= 0 { - itemp := int(vn2[lsticc]) - - // NOTE: The computation of vn1[lsticc] relies on the fact that - // Dnrm2 does not fail on vectors with norm below the value of - // sqrt(dlamchS) - v := bi.Dnrm2(m-rk, a[rk*lda+lsticc:], lda) - vn1[lsticc] = v - vn2[lsticc] = v - - lsticc = itemp - } - - return kb -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go deleted file mode 100644 index e9fbb6007..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" -) - -// Dlaqr04 computes the eigenvalues of a block of an n×n upper Hessenberg matrix -// H, and optionally the matrices T and Z from the Schur decomposition -// H = Z T Z^T -// where T is an upper quasi-triangular matrix (the Schur form), and Z is the -// orthogonal matrix of Schur vectors. -// -// wantt indicates whether the full Schur form T is required. If wantt is false, -// then only enough of H will be updated to preserve the eigenvalues. -// -// wantz indicates whether the n×n matrix of Schur vectors Z is required. If it -// is true, the orthogonal similarity transformation will be accumulated into -// Z[iloz:ihiz+1,ilo:ihi+1], otherwise Z will not be referenced. -// -// ilo and ihi determine the block of H on which Dlaqr04 operates. It must hold that -// 0 <= ilo <= ihi < n, if n > 0, -// ilo == 0 and ihi == -1, if n == 0, -// and the block must be isolated, that is, -// ilo == 0 or H[ilo,ilo-1] == 0, -// ihi == n-1 or H[ihi+1,ihi] == 0, -// otherwise Dlaqr04 will panic. -// -// wr and wi must have length ihi+1. -// -// iloz and ihiz specify the rows of Z to which transformations will be applied -// if wantz is true. It must hold that -// 0 <= iloz <= ilo, and ihi <= ihiz < n, -// otherwise Dlaqr04 will panic. -// -// work must have length at least lwork and lwork must be -// lwork >= 1, if n <= 11, -// lwork >= n, if n > 11, -// otherwise Dlaqr04 will panic. lwork as large as 6*n may be required for -// optimal performance. On return, work[0] will contain the optimal value of -// lwork. -// -// If lwork is -1, instead of performing Dlaqr04, the function only estimates the -// optimal workspace size and stores it into work[0]. Neither h nor z are -// accessed. -// -// recur is the non-negative recursion depth. For recur > 0, Dlaqr04 behaves -// as DLAQR0, for recur == 0 it behaves as DLAQR4. -// -// unconverged indicates whether Dlaqr04 computed all the eigenvalues of H[ilo:ihi+1,ilo:ihi+1]. -// -// If unconverged is zero and wantt is true, H will contain on return the upper -// quasi-triangular matrix T from the Schur decomposition. 2×2 diagonal blocks -// (corresponding to complex conjugate pairs of eigenvalues) will be returned in -// standard form, with H[i,i] == H[i+1,i+1] and H[i+1,i]*H[i,i+1] < 0. -// -// If unconverged is zero and if wantt is false, the contents of h on return is -// unspecified. -// -// If unconverged is zero, all the eigenvalues have been computed and their real -// and imaginary parts will be stored on return in wr[ilo:ihi+1] and -// wi[ilo:ihi+1], respectively. If two eigenvalues are computed as a complex -// conjugate pair, they are stored in consecutive elements of wr and wi, say the -// i-th and (i+1)th, with wi[i] > 0 and wi[i+1] < 0. If wantt is true, then the -// eigenvalues are stored in the same order as on the diagonal of the Schur form -// returned in H, with wr[i] = H[i,i] and, if H[i:i+2,i:i+2] is a 2×2 diagonal -// block, wi[i] = sqrt(-H[i+1,i]*H[i,i+1]) and wi[i+1] = -wi[i]. -// -// If unconverged is positive, some eigenvalues have not converged, and -// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] will contain those -// eigenvalues which have been successfully computed. Failures are rare. -// -// If unconverged is positive and wantt is true, then on return -// (initial H)*U = U*(final H), (*) -// where U is an orthogonal matrix. The final H is upper Hessenberg and -// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. -// -// If unconverged is positive and wantt is false, on return the remaining -// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix -// H[ilo:unconverged,ilo:unconverged]. -// -// If unconverged is positive and wantz is true, then on return -// (final Z) = (initial Z)*U, -// where U is the orthogonal matrix in (*) regardless of the value of wantt. -// -// References: -// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: -// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix -// Anal. Appl. 23(4) (2002), pp. 929—947 -// URL: http://dx.doi.org/10.1137/S0895479801384573 -// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: -// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 -// URL: http://dx.doi.org/10.1137/S0895479801384585 -// -// Dlaqr04 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaqr04(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int, work []float64, lwork int, recur int) (unconverged int) { - const ( - // Matrices of order ntiny or smaller must be processed by - // Dlahqr because of insufficient subdiagonal scratch space. - // This is a hard limit. - ntiny = 11 - // Exceptional deflation windows: try to cure rare slow - // convergence by varying the size of the deflation window after - // kexnw iterations. - kexnw = 5 - // Exceptional shifts: try to cure rare slow convergence with - // ad-hoc exceptional shifts every kexsh iterations. - kexsh = 6 - - // See https://github.com/gonum/lapack/pull/151#discussion_r68162802 - // and the surrounding discussion for an explanation where these - // constants come from. - // TODO(vladimir-ch): Similar constants for exceptional shifts - // are used also in dlahqr.go. The first constant is different - // there, it is equal to 3. Why? And does it matter? - wilk1 = 0.75 - wilk2 = -0.4375 - ) - - switch { - case n < 0: - panic(nLT0) - case ilo < 0 || max(0, n-1) < ilo: - panic(badIlo) - case ihi < min(ilo, n-1) || n <= ihi: - panic(badIhi) - case ldh < max(1, n): - panic(badLdH) - case wantz && (iloz < 0 || ilo < iloz): - panic(badIloz) - case wantz && (ihiz < ihi || n <= ihiz): - panic(badIhiz) - case ldz < 1, wantz && ldz < n: - panic(badLdZ) - case lwork < 1 && lwork != -1: - panic(badLWork) - // TODO(vladimir-ch): Enable if and when we figure out what the minimum - // necessary lwork value is. Dlaqr04 says that the minimum is n which - // clashes with Dlaqr23's opinion about optimal work when nw <= 2 - // (independent of n). - // case lwork < n && n > ntiny && lwork != -1: - // panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - case recur < 0: - panic(recurLT0) - } - - // Quick return. - if n == 0 { - work[0] = 1 - return 0 - } - - if lwork != -1 { - switch { - case len(h) < (n-1)*ldh+n: - panic(shortH) - case len(wr) != ihi+1: - panic(badLenWr) - case len(wi) != ihi+1: - panic(badLenWi) - case wantz && len(z) < (n-1)*ldz+n: - panic(shortZ) - case ilo > 0 && h[ilo*ldh+ilo-1] != 0: - panic(notIsolated) - case ihi+1 < n && h[(ihi+1)*ldh+ihi] != 0: - panic(notIsolated) - } - } - - if n <= ntiny { - // Tiny matrices must use Dlahqr. - if lwork == -1 { - work[0] = 1 - return 0 - } - return impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz) - } - - // Use small bulge multi-shift QR with aggressive early deflation on - // larger-than-tiny matrices. - var jbcmpz string - if wantt { - jbcmpz = "S" - } else { - jbcmpz = "E" - } - if wantz { - jbcmpz += "V" - } else { - jbcmpz += "N" - } - - var fname string - if recur > 0 { - fname = "DLAQR0" - } else { - fname = "DLAQR4" - } - // nwr is the recommended deflation window size. n is greater than 11, - // so there is enough subdiagonal workspace for nwr >= 2 as required. - // (In fact, there is enough subdiagonal space for nwr >= 3.) - // TODO(vladimir-ch): If there is enough space for nwr >= 3, should we - // use it? - nwr := impl.Ilaenv(13, fname, jbcmpz, n, ilo, ihi, lwork) - nwr = max(2, nwr) - nwr = min(ihi-ilo+1, min((n-1)/3, nwr)) - - // nsr is the recommended number of simultaneous shifts. n is greater - // than 11, so there is enough subdiagonal workspace for nsr to be even - // and greater than or equal to two as required. - nsr := impl.Ilaenv(15, fname, jbcmpz, n, ilo, ihi, lwork) - nsr = min(nsr, min((n+6)/9, ihi-ilo)) - nsr = max(2, nsr&^1) - - // Workspace query call to Dlaqr23. - impl.Dlaqr23(wantt, wantz, n, ilo, ihi, nwr+1, h, ldh, iloz, ihiz, z, ldz, - wr, wi, h, ldh, n, h, ldh, n, h, ldh, work, -1, recur) - // Optimal workspace is max(Dlaqr5, Dlaqr23). - lwkopt := max(3*nsr/2, int(work[0])) - // Quick return in case of workspace query. - if lwork == -1 { - work[0] = float64(lwkopt) - return 0 - } - - // Dlahqr/Dlaqr04 crossover point. - nmin := impl.Ilaenv(12, fname, jbcmpz, n, ilo, ihi, lwork) - nmin = max(ntiny, nmin) - - // Nibble determines when to skip a multi-shift QR sweep (Dlaqr5). - nibble := impl.Ilaenv(14, fname, jbcmpz, n, ilo, ihi, lwork) - nibble = max(0, nibble) - - // Computation mode of far-from-diagonal orthogonal updates in Dlaqr5. - kacc22 := impl.Ilaenv(16, fname, jbcmpz, n, ilo, ihi, lwork) - kacc22 = max(0, min(kacc22, 2)) - - // nwmax is the largest possible deflation window for which there is - // sufficient workspace. - nwmax := min((n-1)/3, lwork/2) - nw := nwmax // Start with maximum deflation window size. - - // nsmax is the largest number of simultaneous shifts for which there is - // sufficient workspace. - nsmax := min((n+6)/9, 2*lwork/3) &^ 1 - - ndfl := 1 // Number of iterations since last deflation. - ndec := 0 // Deflation window size decrement. - - // Main loop. - var ( - itmax = max(30, 2*kexsh) * max(10, (ihi-ilo+1)) - it = 0 - ) - for kbot := ihi; kbot >= ilo; { - if it == itmax { - unconverged = kbot + 1 - break - } - it++ - - // Locate active block. - ktop := ilo - for k := kbot; k >= ilo+1; k-- { - if h[k*ldh+k-1] == 0 { - ktop = k - break - } - } - - // Select deflation window size nw. - // - // Typical Case: - // If possible and advisable, nibble the entire active block. - // If not, use size min(nwr,nwmax) or min(nwr+1,nwmax) - // depending upon which has the smaller corresponding - // subdiagonal entry (a heuristic). - // - // Exceptional Case: - // If there have been no deflations in kexnw or more - // iterations, then vary the deflation window size. At first, - // because larger windows are, in general, more powerful than - // smaller ones, rapidly increase the window to the maximum - // possible. Then, gradually reduce the window size. - nh := kbot - ktop + 1 - nwupbd := min(nh, nwmax) - if ndfl < kexnw { - nw = min(nwupbd, nwr) - } else { - nw = min(nwupbd, 2*nw) - } - if nw < nwmax { - if nw >= nh-1 { - nw = nh - } else { - kwtop := kbot - nw + 1 - if math.Abs(h[kwtop*ldh+kwtop-1]) > math.Abs(h[(kwtop-1)*ldh+kwtop-2]) { - nw++ - } - } - } - if ndfl < kexnw { - ndec = -1 - } else if ndec >= 0 || nw >= nwupbd { - ndec++ - if nw-ndec < 2 { - ndec = 0 - } - nw -= ndec - } - - // Split workspace under the subdiagonal of H into: - // - an nw×nw work array V in the lower left-hand corner, - // - an nw×nhv horizontal work array along the bottom edge (nhv - // must be at least nw but more is better), - // - an nve×nw vertical work array along the left-hand-edge - // (nhv can be any positive integer but more is better). - kv := n - nw - kt := nw - kwv := nw + 1 - nhv := n - kwv - kt - // Aggressive early deflation. - ls, ld := impl.Dlaqr23(wantt, wantz, n, ktop, kbot, nw, - h, ldh, iloz, ihiz, z, ldz, wr[:kbot+1], wi[:kbot+1], - h[kv*ldh:], ldh, nhv, h[kv*ldh+kt:], ldh, nhv, h[kwv*ldh:], ldh, work, lwork, recur) - - // Adjust kbot accounting for new deflations. - kbot -= ld - // ks points to the shifts. - ks := kbot - ls + 1 - - // Skip an expensive QR sweep if there is a (partly heuristic) - // reason to expect that many eigenvalues will deflate without - // it. Here, the QR sweep is skipped if many eigenvalues have - // just been deflated or if the remaining active block is small. - if ld > 0 && (100*ld > nw*nibble || kbot-ktop+1 <= min(nmin, nwmax)) { - // ld is positive, note progress. - ndfl = 1 - continue - } - - // ns is the nominal number of simultaneous shifts. This may be - // lowered (slightly) if Dlaqr23 did not provide that many - // shifts. - ns := min(min(nsmax, nsr), max(2, kbot-ktop)) &^ 1 - - // If there have been no deflations in a multiple of kexsh - // iterations, then try exceptional shifts. Otherwise use shifts - // provided by Dlaqr23 above or from the eigenvalues of a - // trailing principal submatrix. - if ndfl%kexsh == 0 { - ks = kbot - ns + 1 - for i := kbot; i > max(ks, ktop+1); i -= 2 { - ss := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) - aa := wilk1*ss + h[i*ldh+i] - _, _, _, _, wr[i-1], wi[i-1], wr[i], wi[i], _, _ = - impl.Dlanv2(aa, ss, wilk2*ss, aa) - } - if ks == ktop { - wr[ks+1] = h[(ks+1)*ldh+ks+1] - wi[ks+1] = 0 - wr[ks] = wr[ks+1] - wi[ks] = wi[ks+1] - } - } else { - // If we got ns/2 or fewer shifts, use Dlahqr or recur - // into Dlaqr04 on a trailing principal submatrix to get - // more. Since ns <= nsmax <=(n+6)/9, there is enough - // space below the subdiagonal to fit an ns×ns scratch - // array. - if kbot-ks+1 <= ns/2 { - ks = kbot - ns + 1 - kt = n - ns - impl.Dlacpy(blas.All, ns, ns, h[ks*ldh+ks:], ldh, h[kt*ldh:], ldh) - if ns > nmin && recur > 0 { - ks += impl.Dlaqr04(false, false, ns, 1, ns-1, h[kt*ldh:], ldh, - wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 0, work, lwork, recur-1) - } else { - ks += impl.Dlahqr(false, false, ns, 0, ns-1, h[kt*ldh:], ldh, - wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 1) - } - // In case of a rare QR failure use eigenvalues - // of the trailing 2×2 principal submatrix. - if ks >= kbot { - aa := h[(kbot-1)*ldh+kbot-1] - bb := h[(kbot-1)*ldh+kbot] - cc := h[kbot*ldh+kbot-1] - dd := h[kbot*ldh+kbot] - _, _, _, _, wr[kbot-1], wi[kbot-1], wr[kbot], wi[kbot], _, _ = - impl.Dlanv2(aa, bb, cc, dd) - ks = kbot - 1 - } - } - - if kbot-ks+1 > ns { - // Sorting the shifts helps a little. Bubble - // sort keeps complex conjugate pairs together. - sorted := false - for k := kbot; k > ks; k-- { - if sorted { - break - } - sorted = true - for i := ks; i < k; i++ { - if math.Abs(wr[i])+math.Abs(wi[i]) >= math.Abs(wr[i+1])+math.Abs(wi[i+1]) { - continue - } - sorted = false - wr[i], wr[i+1] = wr[i+1], wr[i] - wi[i], wi[i+1] = wi[i+1], wi[i] - } - } - } - - // Shuffle shifts into pairs of real shifts and pairs of - // complex conjugate shifts using the fact that complex - // conjugate shifts are already adjacent to one another. - // TODO(vladimir-ch): The shuffling here could probably - // be removed but I'm not sure right now and it's safer - // to leave it. - for i := kbot; i > ks+1; i -= 2 { - if wi[i] == -wi[i-1] { - continue - } - wr[i], wr[i-1], wr[i-2] = wr[i-1], wr[i-2], wr[i] - wi[i], wi[i-1], wi[i-2] = wi[i-1], wi[i-2], wi[i] - } - } - - // If there are only two shifts and both are real, then use only one. - if kbot-ks+1 == 2 && wi[kbot] == 0 { - if math.Abs(wr[kbot]-h[kbot*ldh+kbot]) < math.Abs(wr[kbot-1]-h[kbot*ldh+kbot]) { - wr[kbot-1] = wr[kbot] - } else { - wr[kbot] = wr[kbot-1] - } - } - - // Use up to ns of the smallest magnitude shifts. If there - // aren't ns shifts available, then use them all, possibly - // dropping one to make the number of shifts even. - ns = min(ns, kbot-ks+1) &^ 1 - ks = kbot - ns + 1 - - // Split workspace under the subdiagonal into: - // - a kdu×kdu work array U in the lower left-hand-corner, - // - a kdu×nhv horizontal work array WH along the bottom edge - // (nhv must be at least kdu but more is better), - // - an nhv×kdu vertical work array WV along the left-hand-edge - // (nhv must be at least kdu but more is better). - kdu := 3*ns - 3 - ku := n - kdu - kwh := kdu - kwv = kdu + 3 - nhv = n - kwv - kdu - // Small-bulge multi-shift QR sweep. - impl.Dlaqr5(wantt, wantz, kacc22, n, ktop, kbot, ns, - wr[ks:ks+ns], wi[ks:ks+ns], h, ldh, iloz, ihiz, z, ldz, - work, 3, h[ku*ldh:], ldh, nhv, h[kwv*ldh:], ldh, nhv, h[ku*ldh+kwh:], ldh) - - // Note progress (or the lack of it). - if ld > 0 { - ndfl = 1 - } else { - ndfl++ - } - } - - work[0] = float64(lwkopt) - return unconverged -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go deleted file mode 100644 index e21373bd1..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlaqr1 sets v to a scalar multiple of the first column of the product -// (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) -// where H is a 2×2 or 3×3 matrix, I is the identity matrix of the same size, -// and i is the imaginary unit. Scaling is done to avoid overflows and most -// underflows. -// -// n is the order of H and must be either 2 or 3. It must hold that either sr1 = -// sr2 and si1 = -si2, or si1 = si2 = 0. The length of v must be equal to n. If -// any of these conditions is not met, Dlaqr1 will panic. -// -// Dlaqr1 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaqr1(n int, h []float64, ldh int, sr1, si1, sr2, si2 float64, v []float64) { - switch { - case n != 2 && n != 3: - panic("lapack: n must be 2 or 3") - case ldh < n: - panic(badLdH) - case len(h) < (n-1)*ldh+n: - panic(shortH) - case !((sr1 == sr2 && si1 == -si2) || (si1 == 0 && si2 == 0)): - panic(badShifts) - case len(v) != n: - panic(shortV) - } - - if n == 2 { - s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) - if s == 0 { - v[0] = 0 - v[1] = 0 - } else { - h21s := h[ldh] / s - v[0] = h21s*h[1] + (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) - v[1] = h21s * (h[0] + h[ldh+1] - sr1 - sr2) - } - return - } - - s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) + math.Abs(h[2*ldh]) - if s == 0 { - v[0] = 0 - v[1] = 0 - v[2] = 0 - } else { - h21s := h[ldh] / s - h31s := h[2*ldh] / s - v[0] = (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) + h[1]*h21s + h[2]*h31s - v[1] = h21s*(h[0]+h[ldh+1]-sr1-sr2) + h[ldh+2]*h31s - v[2] = h31s*(h[0]+h[2*ldh+2]-sr1-sr2) + h21s*h[2*ldh+1] - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go deleted file mode 100644 index ff299a73a..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dlaqr23 performs the orthogonal similarity transformation of an n×n upper -// Hessenberg matrix to detect and deflate fully converged eigenvalues from a -// trailing principal submatrix using aggressive early deflation [1]. -// -// On return, H will be overwritten by a new Hessenberg matrix that is a -// perturbation of an orthogonal similarity transformation of H. It is hoped -// that on output H will have many zero subdiagonal entries. -// -// If wantt is true, the matrix H will be fully updated so that the -// quasi-triangular Schur factor can be computed. If wantt is false, then only -// enough of H will be updated to preserve the eigenvalues. -// -// If wantz is true, the orthogonal similarity transformation will be -// accumulated into Z[iloz:ihiz+1,ktop:kbot+1], otherwise Z is not referenced. -// -// ktop and kbot determine a block [ktop:kbot+1,ktop:kbot+1] along the diagonal -// of H. It must hold that -// 0 <= ilo <= ihi < n, if n > 0, -// ilo == 0 and ihi == -1, if n == 0, -// and the block must be isolated, that is, it must hold that -// ktop == 0 or H[ktop,ktop-1] == 0, -// kbot == n-1 or H[kbot+1,kbot] == 0, -// otherwise Dlaqr23 will panic. -// -// nw is the deflation window size. It must hold that -// 0 <= nw <= kbot-ktop+1, -// otherwise Dlaqr23 will panic. -// -// iloz and ihiz specify the rows of the n×n matrix Z to which transformations -// will be applied if wantz is true. It must hold that -// 0 <= iloz <= ktop, and kbot <= ihiz < n, -// otherwise Dlaqr23 will panic. -// -// sr and si must have length kbot+1, otherwise Dlaqr23 will panic. -// -// v and ldv represent an nw×nw work matrix. -// t and ldt represent an nw×nh work matrix, and nh must be at least nw. -// wv and ldwv represent an nv×nw work matrix. -// -// work must have length at least lwork and lwork must be at least max(1,2*nw), -// otherwise Dlaqr23 will panic. Larger values of lwork may result in greater -// efficiency. On return, work[0] will contain the optimal value of lwork. -// -// If lwork is -1, instead of performing Dlaqr23, the function only estimates the -// optimal workspace size and stores it into work[0]. Neither h nor z are -// accessed. -// -// recur is the non-negative recursion depth. For recur > 0, Dlaqr23 behaves -// as DLAQR3, for recur == 0 it behaves as DLAQR2. -// -// On return, ns and nd will contain respectively the number of unconverged -// (i.e., approximate) eigenvalues and converged eigenvalues that are stored in -// sr and si. -// -// On return, the real and imaginary parts of approximate eigenvalues that may -// be used for shifts will be stored respectively in sr[kbot-nd-ns+1:kbot-nd+1] -// and si[kbot-nd-ns+1:kbot-nd+1]. -// -// On return, the real and imaginary parts of converged eigenvalues will be -// stored respectively in sr[kbot-nd+1:kbot+1] and si[kbot-nd+1:kbot+1]. -// -// References: -// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: -// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl 23(4) (2002), pp. 948—973 -// URL: http://dx.doi.org/10.1137/S0895479801384585 -// -func (impl Implementation) Dlaqr23(wantt, wantz bool, n, ktop, kbot, nw int, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, sr, si []float64, v []float64, ldv int, nh int, t []float64, ldt int, nv int, wv []float64, ldwv int, work []float64, lwork int, recur int) (ns, nd int) { - switch { - case n < 0: - panic(nLT0) - case ktop < 0 || max(0, n-1) < ktop: - panic(badKtop) - case kbot < min(ktop, n-1) || n <= kbot: - panic(badKbot) - case nw < 0 || kbot-ktop+1+1 < nw: - panic(badNw) - case ldh < max(1, n): - panic(badLdH) - case wantz && (iloz < 0 || ktop < iloz): - panic(badIloz) - case wantz && (ihiz < kbot || n <= ihiz): - panic(badIhiz) - case ldz < 1, wantz && ldz < n: - panic(badLdZ) - case ldv < max(1, nw): - panic(badLdV) - case nh < nw: - panic(badNh) - case ldt < max(1, nh): - panic(badLdT) - case nv < 0: - panic(nvLT0) - case ldwv < max(1, nw): - panic(badLdWV) - case lwork < max(1, 2*nw) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - case recur < 0: - panic(recurLT0) - } - - // Quick return for zero window size. - if nw == 0 { - work[0] = 1 - return 0, 0 - } - - // LAPACK code does not enforce the documented behavior - // nw <= kbot-ktop+1 - // but we do (we panic above). - jw := nw - lwkopt := max(1, 2*nw) - if jw > 2 { - // Workspace query call to Dgehrd. - impl.Dgehrd(jw, 0, jw-2, t, ldt, work, work, -1) - lwk1 := int(work[0]) - // Workspace query call to Dormhr. - impl.Dormhr(blas.Right, blas.NoTrans, jw, jw, 0, jw-2, t, ldt, work, v, ldv, work, -1) - lwk2 := int(work[0]) - if recur > 0 { - // Workspace query call to Dlaqr04. - impl.Dlaqr04(true, true, jw, 0, jw-1, t, ldt, sr, si, 0, jw-1, v, ldv, work, -1, recur-1) - lwk3 := int(work[0]) - // Optimal workspace. - lwkopt = max(jw+max(lwk1, lwk2), lwk3) - } else { - // Optimal workspace. - lwkopt = jw + max(lwk1, lwk2) - } - } - // Quick return in case of workspace query. - if lwork == -1 { - work[0] = float64(lwkopt) - return 0, 0 - } - - // Check input slices only if not doing workspace query. - switch { - case len(h) < (n-1)*ldh+n: - panic(shortH) - case len(v) < (nw-1)*ldv+nw: - panic(shortV) - case len(t) < (nw-1)*ldt+nh: - panic(shortT) - case len(wv) < (nv-1)*ldwv+nw: - panic(shortWV) - case wantz && len(z) < (n-1)*ldz+n: - panic(shortZ) - case len(sr) != kbot+1: - panic(badLenSr) - case len(si) != kbot+1: - panic(badLenSi) - case ktop > 0 && h[ktop*ldh+ktop-1] != 0: - panic(notIsolated) - case kbot+1 < n && h[(kbot+1)*ldh+kbot] != 0: - panic(notIsolated) - } - - // Machine constants. - ulp := dlamchP - smlnum := float64(n) / ulp * dlamchS - - // Setup deflation window. - var s float64 - kwtop := kbot - jw + 1 - if kwtop != ktop { - s = h[kwtop*ldh+kwtop-1] - } - if kwtop == kbot { - // 1×1 deflation window. - sr[kwtop] = h[kwtop*ldh+kwtop] - si[kwtop] = 0 - ns = 1 - nd = 0 - if math.Abs(s) <= math.Max(smlnum, ulp*math.Abs(h[kwtop*ldh+kwtop])) { - ns = 0 - nd = 1 - if kwtop > ktop { - h[kwtop*ldh+kwtop-1] = 0 - } - } - work[0] = 1 - return ns, nd - } - - // Convert to spike-triangular form. In case of a rare QR failure, this - // routine continues to do aggressive early deflation using that part of - // the deflation window that converged using infqr here and there to - // keep track. - impl.Dlacpy(blas.Upper, jw, jw, h[kwtop*ldh+kwtop:], ldh, t, ldt) - bi := blas64.Implementation() - bi.Dcopy(jw-1, h[(kwtop+1)*ldh+kwtop:], ldh+1, t[ldt:], ldt+1) - impl.Dlaset(blas.All, jw, jw, 0, 1, v, ldv) - nmin := impl.Ilaenv(12, "DLAQR3", "SV", jw, 0, jw-1, lwork) - var infqr int - if recur > 0 && jw > nmin { - infqr = impl.Dlaqr04(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv, work, lwork, recur-1) - } else { - infqr = impl.Dlahqr(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv) - } - // Note that ilo == 0 which conveniently coincides with the success - // value of infqr, that is, infqr as an index always points to the first - // converged eigenvalue. - - // Dtrexc needs a clean margin near the diagonal. - for j := 0; j < jw-3; j++ { - t[(j+2)*ldt+j] = 0 - t[(j+3)*ldt+j] = 0 - } - if jw >= 3 { - t[(jw-1)*ldt+jw-3] = 0 - } - - ns = jw - ilst := infqr - // Deflation detection loop. - for ilst < ns { - bulge := false - if ns >= 2 { - bulge = t[(ns-1)*ldt+ns-2] != 0 - } - if !bulge { - // Real eigenvalue. - abst := math.Abs(t[(ns-1)*ldt+ns-1]) - if abst == 0 { - abst = math.Abs(s) - } - if math.Abs(s*v[ns-1]) <= math.Max(smlnum, ulp*abst) { - // Deflatable. - ns-- - } else { - // Undeflatable, move it up out of the way. - // Dtrexc can not fail in this case. - _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) - ilst++ - } - continue - } - // Complex conjugate pair. - abst := math.Abs(t[(ns-1)*ldt+ns-1]) + math.Sqrt(math.Abs(t[(ns-1)*ldt+ns-2]))*math.Sqrt(math.Abs(t[(ns-2)*ldt+ns-1])) - if abst == 0 { - abst = math.Abs(s) - } - if math.Max(math.Abs(s*v[ns-1]), math.Abs(s*v[ns-2])) <= math.Max(smlnum, ulp*abst) { - // Deflatable. - ns -= 2 - } else { - // Undeflatable, move them up out of the way. - // Dtrexc does the right thing with ilst in case of a - // rare exchange failure. - _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) - ilst += 2 - } - } - - // Return to Hessenberg form. - if ns == 0 { - s = 0 - } - if ns < jw { - // Sorting diagonal blocks of T improves accuracy for graded - // matrices. Bubble sort deals well with exchange failures. - sorted := false - i := ns - for !sorted { - sorted = true - kend := i - 1 - i = infqr - var k int - if i == ns-1 || t[(i+1)*ldt+i] == 0 { - k = i + 1 - } else { - k = i + 2 - } - for k <= kend { - var evi float64 - if k == i+1 { - evi = math.Abs(t[i*ldt+i]) - } else { - evi = math.Abs(t[i*ldt+i]) + math.Sqrt(math.Abs(t[(i+1)*ldt+i]))*math.Sqrt(math.Abs(t[i*ldt+i+1])) - } - - var evk float64 - if k == kend || t[(k+1)*ldt+k] == 0 { - evk = math.Abs(t[k*ldt+k]) - } else { - evk = math.Abs(t[k*ldt+k]) + math.Sqrt(math.Abs(t[(k+1)*ldt+k]))*math.Sqrt(math.Abs(t[k*ldt+k+1])) - } - - if evi >= evk { - i = k - } else { - sorted = false - _, ilst, ok := impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, i, k, work) - if ok { - i = ilst - } else { - i = k - } - } - if i == kend || t[(i+1)*ldt+i] == 0 { - k = i + 1 - } else { - k = i + 2 - } - } - } - } - - // Restore shift/eigenvalue array from T. - for i := jw - 1; i >= infqr; { - if i == infqr || t[i*ldt+i-1] == 0 { - sr[kwtop+i] = t[i*ldt+i] - si[kwtop+i] = 0 - i-- - continue - } - aa := t[(i-1)*ldt+i-1] - bb := t[(i-1)*ldt+i] - cc := t[i*ldt+i-1] - dd := t[i*ldt+i] - _, _, _, _, sr[kwtop+i-1], si[kwtop+i-1], sr[kwtop+i], si[kwtop+i], _, _ = impl.Dlanv2(aa, bb, cc, dd) - i -= 2 - } - - if ns < jw || s == 0 { - if ns > 1 && s != 0 { - // Reflect spike back into lower triangle. - bi.Dcopy(ns, v[:ns], 1, work[:ns], 1) - _, tau := impl.Dlarfg(ns, work[0], work[1:ns], 1) - work[0] = 1 - impl.Dlaset(blas.Lower, jw-2, jw-2, 0, 0, t[2*ldt:], ldt) - impl.Dlarf(blas.Left, ns, jw, work[:ns], 1, tau, t, ldt, work[jw:]) - impl.Dlarf(blas.Right, ns, ns, work[:ns], 1, tau, t, ldt, work[jw:]) - impl.Dlarf(blas.Right, jw, ns, work[:ns], 1, tau, v, ldv, work[jw:]) - impl.Dgehrd(jw, 0, ns-1, t, ldt, work[:jw-1], work[jw:], lwork-jw) - } - - // Copy updated reduced window into place. - if kwtop > 0 { - h[kwtop*ldh+kwtop-1] = s * v[0] - } - impl.Dlacpy(blas.Upper, jw, jw, t, ldt, h[kwtop*ldh+kwtop:], ldh) - bi.Dcopy(jw-1, t[ldt:], ldt+1, h[(kwtop+1)*ldh+kwtop:], ldh+1) - - // Accumulate orthogonal matrix in order to update H and Z, if - // requested. - if ns > 1 && s != 0 { - // work[:ns-1] contains the elementary reflectors stored - // by a call to Dgehrd above. - impl.Dormhr(blas.Right, blas.NoTrans, jw, ns, 0, ns-1, - t, ldt, work[:ns-1], v, ldv, work[jw:], lwork-jw) - } - - // Update vertical slab in H. - var ltop int - if !wantt { - ltop = ktop - } - for krow := ltop; krow < kwtop; krow += nv { - kln := min(nv, kwtop-krow) - bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, - 1, h[krow*ldh+kwtop:], ldh, v, ldv, - 0, wv, ldwv) - impl.Dlacpy(blas.All, kln, jw, wv, ldwv, h[krow*ldh+kwtop:], ldh) - } - - // Update horizontal slab in H. - if wantt { - for kcol := kbot + 1; kcol < n; kcol += nh { - kln := min(nh, n-kcol) - bi.Dgemm(blas.Trans, blas.NoTrans, jw, kln, jw, - 1, v, ldv, h[kwtop*ldh+kcol:], ldh, - 0, t, ldt) - impl.Dlacpy(blas.All, jw, kln, t, ldt, h[kwtop*ldh+kcol:], ldh) - } - } - - // Update vertical slab in Z. - if wantz { - for krow := iloz; krow <= ihiz; krow += nv { - kln := min(nv, ihiz-krow+1) - bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, - 1, z[krow*ldz+kwtop:], ldz, v, ldv, - 0, wv, ldwv) - impl.Dlacpy(blas.All, kln, jw, wv, ldwv, z[krow*ldz+kwtop:], ldz) - } - } - } - - // The number of deflations. - nd = jw - ns - // Shifts are converged eigenvalues that could not be deflated. - // Subtracting infqr from the spike length takes care of the case of a - // rare QR failure while calculating eigenvalues of the deflation - // window. - ns -= infqr - work[0] = float64(lwkopt) - return ns, nd -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go deleted file mode 100644 index c198f229a..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlaqr5 performs a single small-bulge multi-shift QR sweep on an isolated -// block of a Hessenberg matrix. -// -// wantt and wantz determine whether the quasi-triangular Schur factor and the -// orthogonal Schur factor, respectively, will be computed. -// -// kacc22 specifies the computation mode of far-from-diagonal orthogonal -// updates. Permitted values are: -// 0: Dlaqr5 will not accumulate reflections and will not use matrix-matrix -// multiply to update far-from-diagonal matrix entries. -// 1: Dlaqr5 will accumulate reflections and use matrix-matrix multiply to -// update far-from-diagonal matrix entries. -// 2: Dlaqr5 will accumulate reflections, use matrix-matrix multiply to update -// far-from-diagonal matrix entries, and take advantage of 2×2 block -// structure during matrix multiplies. -// For other values of kacc2 Dlaqr5 will panic. -// -// n is the order of the Hessenberg matrix H. -// -// ktop and kbot are indices of the first and last row and column of an isolated -// diagonal block upon which the QR sweep will be applied. It must hold that -// ktop == 0, or 0 < ktop <= n-1 and H[ktop, ktop-1] == 0, and -// kbot == n-1, or 0 <= kbot < n-1 and H[kbot+1, kbot] == 0, -// otherwise Dlaqr5 will panic. -// -// nshfts is the number of simultaneous shifts. It must be positive and even, -// otherwise Dlaqr5 will panic. -// -// sr and si contain the real and imaginary parts, respectively, of the shifts -// of origin that define the multi-shift QR sweep. On return both slices may be -// reordered by Dlaqr5. Their length must be equal to nshfts, otherwise Dlaqr5 -// will panic. -// -// h and ldh represent the Hessenberg matrix H of size n×n. On return -// multi-shift QR sweep with shifts sr+i*si has been applied to the isolated -// diagonal block in rows and columns ktop through kbot, inclusive. -// -// iloz and ihiz specify the rows of Z to which transformations will be applied -// if wantz is true. It must hold that 0 <= iloz <= ihiz < n, otherwise Dlaqr5 -// will panic. -// -// z and ldz represent the matrix Z of size n×n. If wantz is true, the QR sweep -// orthogonal similarity transformation is accumulated into -// z[iloz:ihiz,iloz:ihiz] from the right, otherwise z not referenced. -// -// v and ldv represent an auxiliary matrix V of size (nshfts/2)×3. Note that V -// is transposed with respect to the reference netlib implementation. -// -// u and ldu represent an auxiliary matrix of size (3*nshfts-3)×(3*nshfts-3). -// -// wh and ldwh represent an auxiliary matrix of size (3*nshfts-3)×nh. -// -// wv and ldwv represent an auxiliary matrix of size nv×(3*nshfts-3). -// -// Dlaqr5 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaqr5(wantt, wantz bool, kacc22 int, n, ktop, kbot, nshfts int, sr, si []float64, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, v []float64, ldv int, u []float64, ldu int, nv int, wv []float64, ldwv int, nh int, wh []float64, ldwh int) { - switch { - case kacc22 != 0 && kacc22 != 1 && kacc22 != 2: - panic(badKacc22) - case n < 0: - panic(nLT0) - case ktop < 0 || n <= ktop: - panic(badKtop) - case kbot < 0 || n <= kbot: - panic(badKbot) - - case nshfts < 0: - panic(nshftsLT0) - case nshfts&0x1 != 0: - panic(nshftsOdd) - case len(sr) != nshfts: - panic(badLenSr) - case len(si) != nshfts: - panic(badLenSi) - - case ldh < max(1, n): - panic(badLdH) - case len(h) < (n-1)*ldh+n: - panic(shortH) - - case wantz && ihiz >= n: - panic(badIhiz) - case wantz && iloz < 0 || ihiz < iloz: - panic(badIloz) - case ldz < 1, wantz && ldz < n: - panic(badLdZ) - case wantz && len(z) < (n-1)*ldz+n: - panic(shortZ) - - case ldv < 3: - // V is transposed w.r.t. reference lapack. - panic(badLdV) - case len(v) < (nshfts/2-1)*ldv+3: - panic(shortV) - - case ldu < max(1, 3*nshfts-3): - panic(badLdU) - case len(u) < (3*nshfts-3-1)*ldu+3*nshfts-3: - panic(shortU) - - case nv < 0: - panic(nvLT0) - case ldwv < max(1, 3*nshfts-3): - panic(badLdWV) - case len(wv) < (nv-1)*ldwv+3*nshfts-3: - panic(shortWV) - - case nh < 0: - panic(nhLT0) - case ldwh < max(1, nh): - panic(badLdWH) - case len(wh) < (3*nshfts-3-1)*ldwh+nh: - panic(shortWH) - - case ktop > 0 && h[ktop*ldh+ktop-1] != 0: - panic(notIsolated) - case kbot < n-1 && h[(kbot+1)*ldh+kbot] != 0: - panic(notIsolated) - } - - // If there are no shifts, then there is nothing to do. - if nshfts < 2 { - return - } - // If the active block is empty or 1×1, then there is nothing to do. - if ktop >= kbot { - return - } - - // Shuffle shifts into pairs of real shifts and pairs of complex - // conjugate shifts assuming complex conjugate shifts are already - // adjacent to one another. - for i := 0; i < nshfts-2; i += 2 { - if si[i] == -si[i+1] { - continue - } - sr[i], sr[i+1], sr[i+2] = sr[i+1], sr[i+2], sr[i] - si[i], si[i+1], si[i+2] = si[i+1], si[i+2], si[i] - } - - // Note: lapack says that nshfts must be even but allows it to be odd - // anyway. We panic above if nshfts is not even, so reducing it by one - // is unnecessary. The only caller Dlaqr04 uses only even nshfts. - // - // The original comment and code from lapack-3.6.0/SRC/dlaqr5.f:341: - // * ==== NSHFTS is supposed to be even, but if it is odd, - // * . then simply reduce it by one. The shuffle above - // * . ensures that the dropped shift is real and that - // * . the remaining shifts are paired. ==== - // * - // NS = NSHFTS - MOD( NSHFTS, 2 ) - ns := nshfts - - safmin := dlamchS - ulp := dlamchP - smlnum := safmin * float64(n) / ulp - - // Use accumulated reflections to update far-from-diagonal entries? - accum := kacc22 == 1 || kacc22 == 2 - // If so, exploit the 2×2 block structure? - blk22 := ns > 2 && kacc22 == 2 - - // Clear trash. - if ktop+2 <= kbot { - h[(ktop+2)*ldh+ktop] = 0 - } - - // nbmps = number of 2-shift bulges in the chain. - nbmps := ns / 2 - - // kdu = width of slab. - kdu := 6*nbmps - 3 - - // Create and chase chains of nbmps bulges. - for incol := 3*(1-nbmps) + ktop - 1; incol <= kbot-2; incol += 3*nbmps - 2 { - ndcol := incol + kdu - if accum { - impl.Dlaset(blas.All, kdu, kdu, 0, 1, u, ldu) - } - - // Near-the-diagonal bulge chase. The following loop performs - // the near-the-diagonal part of a small bulge multi-shift QR - // sweep. Each 6*nbmps-2 column diagonal chunk extends from - // column incol to column ndcol (including both column incol and - // column ndcol). The following loop chases a 3*nbmps column - // long chain of nbmps bulges 3*nbmps-2 columns to the right. - // (incol may be less than ktop and ndcol may be greater than - // kbot indicating phantom columns from which to chase bulges - // before they are actually introduced or to which to chase - // bulges beyond column kbot.) - for krcol := incol; krcol <= min(incol+3*nbmps-3, kbot-2); krcol++ { - // Bulges number mtop to mbot are active double implicit - // shift bulges. There may or may not also be small 2×2 - // bulge, if there is room. The inactive bulges (if any) - // must wait until the active bulges have moved down the - // diagonal to make room. The phantom matrix paradigm - // described above helps keep track. - - mtop := max(0, ((ktop-1)-krcol+2)/3) - mbot := min(nbmps, (kbot-krcol)/3) - 1 - m22 := mbot + 1 - bmp22 := (mbot < nbmps-1) && (krcol+3*m22 == kbot-2) - - // Generate reflections to chase the chain right one - // column. (The minimum value of k is ktop-1.) - for m := mtop; m <= mbot; m++ { - k := krcol + 3*m - if k == ktop-1 { - impl.Dlaqr1(3, h[ktop*ldh+ktop:], ldh, - sr[2*m], si[2*m], sr[2*m+1], si[2*m+1], - v[m*ldv:m*ldv+3]) - alpha := v[m*ldv] - _, v[m*ldv] = impl.Dlarfg(3, alpha, v[m*ldv+1:m*ldv+3], 1) - continue - } - beta := h[(k+1)*ldh+k] - v[m*ldv+1] = h[(k+2)*ldh+k] - v[m*ldv+2] = h[(k+3)*ldh+k] - beta, v[m*ldv] = impl.Dlarfg(3, beta, v[m*ldv+1:m*ldv+3], 1) - - // A bulge may collapse because of vigilant deflation or - // destructive underflow. In the underflow case, try the - // two-small-subdiagonals trick to try to reinflate the - // bulge. - if h[(k+3)*ldh+k] != 0 || h[(k+3)*ldh+k+1] != 0 || h[(k+3)*ldh+k+2] == 0 { - // Typical case: not collapsed (yet). - h[(k+1)*ldh+k] = beta - h[(k+2)*ldh+k] = 0 - h[(k+3)*ldh+k] = 0 - continue - } - - // Atypical case: collapsed. Attempt to reintroduce - // ignoring H[k+1,k] and H[k+2,k]. If the fill - // resulting from the new reflector is too large, - // then abandon it. Otherwise, use the new one. - var vt [3]float64 - impl.Dlaqr1(3, h[(k+1)*ldh+k+1:], ldh, sr[2*m], - si[2*m], sr[2*m+1], si[2*m+1], vt[:]) - alpha := vt[0] - _, vt[0] = impl.Dlarfg(3, alpha, vt[1:3], 1) - refsum := vt[0] * (h[(k+1)*ldh+k] + vt[1]*h[(k+2)*ldh+k]) - - dsum := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) + math.Abs(h[(k+2)*ldh+k+2]) - if math.Abs(h[(k+2)*ldh+k]-refsum*vt[1])+math.Abs(refsum*vt[2]) > ulp*dsum { - // Starting a new bulge here would create - // non-negligible fill. Use the old one with - // trepidation. - h[(k+1)*ldh+k] = beta - h[(k+2)*ldh+k] = 0 - h[(k+3)*ldh+k] = 0 - continue - } else { - // Starting a new bulge here would create - // only negligible fill. Replace the old - // reflector with the new one. - h[(k+1)*ldh+k] -= refsum - h[(k+2)*ldh+k] = 0 - h[(k+3)*ldh+k] = 0 - v[m*ldv] = vt[0] - v[m*ldv+1] = vt[1] - v[m*ldv+2] = vt[2] - } - } - - // Generate a 2×2 reflection, if needed. - if bmp22 { - k := krcol + 3*m22 - if k == ktop-1 { - impl.Dlaqr1(2, h[(k+1)*ldh+k+1:], ldh, - sr[2*m22], si[2*m22], sr[2*m22+1], si[2*m22+1], - v[m22*ldv:m22*ldv+2]) - beta := v[m22*ldv] - _, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) - } else { - beta := h[(k+1)*ldh+k] - v[m22*ldv+1] = h[(k+2)*ldh+k] - beta, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) - h[(k+1)*ldh+k] = beta - h[(k+2)*ldh+k] = 0 - } - } - - // Multiply H by reflections from the left. - var jbot int - switch { - case accum: - jbot = min(ndcol, kbot) - case wantt: - jbot = n - 1 - default: - jbot = kbot - } - for j := max(ktop, krcol); j <= jbot; j++ { - mend := min(mbot+1, (j-krcol+2)/3) - 1 - for m := mtop; m <= mend; m++ { - k := krcol + 3*m - refsum := v[m*ldv] * (h[(k+1)*ldh+j] + - v[m*ldv+1]*h[(k+2)*ldh+j] + v[m*ldv+2]*h[(k+3)*ldh+j]) - h[(k+1)*ldh+j] -= refsum - h[(k+2)*ldh+j] -= refsum * v[m*ldv+1] - h[(k+3)*ldh+j] -= refsum * v[m*ldv+2] - } - } - if bmp22 { - k := krcol + 3*m22 - for j := max(k+1, ktop); j <= jbot; j++ { - refsum := v[m22*ldv] * (h[(k+1)*ldh+j] + v[m22*ldv+1]*h[(k+2)*ldh+j]) - h[(k+1)*ldh+j] -= refsum - h[(k+2)*ldh+j] -= refsum * v[m22*ldv+1] - } - } - - // Multiply H by reflections from the right. Delay filling in the last row - // until the vigilant deflation check is complete. - var jtop int - switch { - case accum: - jtop = max(ktop, incol) - case wantt: - jtop = 0 - default: - jtop = ktop - } - for m := mtop; m <= mbot; m++ { - if v[m*ldv] == 0 { - continue - } - k := krcol + 3*m - for j := jtop; j <= min(kbot, k+3); j++ { - refsum := v[m*ldv] * (h[j*ldh+k+1] + - v[m*ldv+1]*h[j*ldh+k+2] + v[m*ldv+2]*h[j*ldh+k+3]) - h[j*ldh+k+1] -= refsum - h[j*ldh+k+2] -= refsum * v[m*ldv+1] - h[j*ldh+k+3] -= refsum * v[m*ldv+2] - } - if accum { - // Accumulate U. (If necessary, update Z later with an - // efficient matrix-matrix multiply.) - kms := k - incol - for j := max(0, ktop-incol-1); j < kdu; j++ { - refsum := v[m*ldv] * (u[j*ldu+kms] + - v[m*ldv+1]*u[j*ldu+kms+1] + v[m*ldv+2]*u[j*ldu+kms+2]) - u[j*ldu+kms] -= refsum - u[j*ldu+kms+1] -= refsum * v[m*ldv+1] - u[j*ldu+kms+2] -= refsum * v[m*ldv+2] - } - } else if wantz { - // U is not accumulated, so update Z now by multiplying by - // reflections from the right. - for j := iloz; j <= ihiz; j++ { - refsum := v[m*ldv] * (z[j*ldz+k+1] + - v[m*ldv+1]*z[j*ldz+k+2] + v[m*ldv+2]*z[j*ldz+k+3]) - z[j*ldz+k+1] -= refsum - z[j*ldz+k+2] -= refsum * v[m*ldv+1] - z[j*ldz+k+3] -= refsum * v[m*ldv+2] - } - } - } - - // Special case: 2×2 reflection (if needed). - if bmp22 && v[m22*ldv] != 0 { - k := krcol + 3*m22 - for j := jtop; j <= min(kbot, k+3); j++ { - refsum := v[m22*ldv] * (h[j*ldh+k+1] + v[m22*ldv+1]*h[j*ldh+k+2]) - h[j*ldh+k+1] -= refsum - h[j*ldh+k+2] -= refsum * v[m22*ldv+1] - } - if accum { - kms := k - incol - for j := max(0, ktop-incol-1); j < kdu; j++ { - refsum := v[m22*ldv] * (u[j*ldu+kms] + v[m22*ldv+1]*u[j*ldu+kms+1]) - u[j*ldu+kms] -= refsum - u[j*ldu+kms+1] -= refsum * v[m22*ldv+1] - } - } else if wantz { - for j := iloz; j <= ihiz; j++ { - refsum := v[m22*ldv] * (z[j*ldz+k+1] + v[m22*ldv+1]*z[j*ldz+k+2]) - z[j*ldz+k+1] -= refsum - z[j*ldz+k+2] -= refsum * v[m22*ldv+1] - } - } - } - - // Vigilant deflation check. - mstart := mtop - if krcol+3*mstart < ktop { - mstart++ - } - mend := mbot - if bmp22 { - mend++ - } - if krcol == kbot-2 { - mend++ - } - for m := mstart; m <= mend; m++ { - k := min(kbot-1, krcol+3*m) - - // The following convergence test requires that the tradition - // small-compared-to-nearby-diagonals criterion and the Ahues & - // Tisseur (LAWN 122, 1997) criteria both be satisfied. The latter - // improves accuracy in some examples. Falling back on an alternate - // convergence criterion when tst1 or tst2 is zero (as done here) is - // traditional but probably unnecessary. - - if h[(k+1)*ldh+k] == 0 { - continue - } - tst1 := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) - if tst1 == 0 { - if k >= ktop+1 { - tst1 += math.Abs(h[k*ldh+k-1]) - } - if k >= ktop+2 { - tst1 += math.Abs(h[k*ldh+k-2]) - } - if k >= ktop+3 { - tst1 += math.Abs(h[k*ldh+k-3]) - } - if k <= kbot-2 { - tst1 += math.Abs(h[(k+2)*ldh+k+1]) - } - if k <= kbot-3 { - tst1 += math.Abs(h[(k+3)*ldh+k+1]) - } - if k <= kbot-4 { - tst1 += math.Abs(h[(k+4)*ldh+k+1]) - } - } - if math.Abs(h[(k+1)*ldh+k]) <= math.Max(smlnum, ulp*tst1) { - h12 := math.Max(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) - h21 := math.Min(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) - h11 := math.Max(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) - h22 := math.Min(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) - scl := h11 + h12 - tst2 := h22 * (h11 / scl) - if tst2 == 0 || h21*(h12/scl) <= math.Max(smlnum, ulp*tst2) { - h[(k+1)*ldh+k] = 0 - } - } - } - - // Fill in the last row of each bulge. - mend = min(nbmps, (kbot-krcol-1)/3) - 1 - for m := mtop; m <= mend; m++ { - k := krcol + 3*m - refsum := v[m*ldv] * v[m*ldv+2] * h[(k+4)*ldh+k+3] - h[(k+4)*ldh+k+1] = -refsum - h[(k+4)*ldh+k+2] = -refsum * v[m*ldv+1] - h[(k+4)*ldh+k+3] -= refsum * v[m*ldv+2] - } - } - - // Use U (if accumulated) to update far-from-diagonal entries in H. - // If required, use U to update Z as well. - if !accum { - continue - } - var jtop, jbot int - if wantt { - jtop = 0 - jbot = n - 1 - } else { - jtop = ktop - jbot = kbot - } - bi := blas64.Implementation() - if !blk22 || incol < ktop || kbot < ndcol || ns <= 2 { - // Updates not exploiting the 2×2 block structure of U. k0 and nu keep track - // of the location and size of U in the special cases of introducing bulges - // and chasing bulges off the bottom. In these special cases and in case the - // number of shifts is ns = 2, there is no 2×2 block structure to exploit. - - k0 := max(0, ktop-incol-1) - nu := kdu - max(0, ndcol-kbot) - k0 - - // Horizontal multiply. - for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { - jlen := min(nh, jbot-jcol+1) - bi.Dgemm(blas.Trans, blas.NoTrans, nu, jlen, nu, - 1, u[k0*ldu+k0:], ldu, - h[(incol+k0+1)*ldh+jcol:], ldh, - 0, wh, ldwh) - impl.Dlacpy(blas.All, nu, jlen, wh, ldwh, h[(incol+k0+1)*ldh+jcol:], ldh) - } - - // Vertical multiply. - for jrow := jtop; jrow <= max(ktop, incol)-1; jrow += nv { - jlen := min(nv, max(ktop, incol)-jrow) - bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, - 1, h[jrow*ldh+incol+k0+1:], ldh, - u[k0*ldu+k0:], ldu, - 0, wv, ldwv) - impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, h[jrow*ldh+incol+k0+1:], ldh) - } - - // Z multiply (also vertical). - if wantz { - for jrow := iloz; jrow <= ihiz; jrow += nv { - jlen := min(nv, ihiz-jrow+1) - bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, - 1, z[jrow*ldz+incol+k0+1:], ldz, - u[k0*ldu+k0:], ldu, - 0, wv, ldwv) - impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, z[jrow*ldz+incol+k0+1:], ldz) - } - } - - continue - } - - // Updates exploiting U's 2×2 block structure. - - // i2, i4, j2, j4 are the last rows and columns of the blocks. - i2 := (kdu + 1) / 2 - i4 := kdu - j2 := i4 - i2 - j4 := kdu - - // kzs and knz deal with the band of zeros along the diagonal of one of the - // triangular blocks. - kzs := (j4 - j2) - (ns + 1) - knz := ns + 1 - - // Horizontal multiply. - for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { - jlen := min(nh, jbot-jcol+1) - - // Copy bottom of H to top+kzs of scratch (the first kzs - // rows get multiplied by zero). - impl.Dlacpy(blas.All, knz, jlen, h[(incol+1+j2)*ldh+jcol:], ldh, wh[kzs*ldwh:], ldwh) - - // Multiply by U21^T. - impl.Dlaset(blas.All, kzs, jlen, 0, 0, wh, ldwh) - bi.Dtrmm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, knz, jlen, - 1, u[j2*ldu+kzs:], ldu, wh[kzs*ldwh:], ldwh) - - // Multiply top of H by U11^T. - bi.Dgemm(blas.Trans, blas.NoTrans, i2, jlen, j2, - 1, u, ldu, h[(incol+1)*ldh+jcol:], ldh, - 1, wh, ldwh) - - // Copy top of H to bottom of WH. - impl.Dlacpy(blas.All, j2, jlen, h[(incol+1)*ldh+jcol:], ldh, wh[i2*ldwh:], ldwh) - - // Multiply by U21^T. - bi.Dtrmm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, j2, jlen, - 1, u[i2:], ldu, wh[i2*ldwh:], ldwh) - - // Multiply by U22. - bi.Dgemm(blas.Trans, blas.NoTrans, i4-i2, jlen, j4-j2, - 1, u[j2*ldu+i2:], ldu, h[(incol+1+j2)*ldh+jcol:], ldh, - 1, wh[i2*ldwh:], ldwh) - - // Copy it back. - impl.Dlacpy(blas.All, kdu, jlen, wh, ldwh, h[(incol+1)*ldh+jcol:], ldh) - } - - // Vertical multiply. - for jrow := jtop; jrow <= max(incol, ktop)-1; jrow += nv { - jlen := min(nv, max(incol, ktop)-jrow) - - // Copy right of H to scratch (the first kzs columns get multiplied - // by zero). - impl.Dlacpy(blas.All, jlen, knz, h[jrow*ldh+incol+1+j2:], ldh, wv[kzs:], ldwv) - - // Multiply by U21. - impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, - 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) - - // Multiply by U11. - bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, - 1, h[jrow*ldh+incol+1:], ldh, u, ldu, - 1, wv, ldwv) - - // Copy left of H to right of scratch. - impl.Dlacpy(blas.All, jlen, j2, h[jrow*ldh+incol+1:], ldh, wv[i2:], ldwv) - - // Multiply by U21. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, - 1, u[i2:], ldu, wv[i2:], ldwv) - - // Multiply by U22. - bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, - 1, h[jrow*ldh+incol+1+j2:], ldh, u[j2*ldu+i2:], ldu, - 1, wv[i2:], ldwv) - - // Copy it back. - impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, h[jrow*ldh+incol+1:], ldh) - } - - if !wantz { - continue - } - // Multiply Z (also vertical). - for jrow := iloz; jrow <= ihiz; jrow += nv { - jlen := min(nv, ihiz-jrow+1) - - // Copy right of Z to left of scratch (first kzs columns get - // multiplied by zero). - impl.Dlacpy(blas.All, jlen, knz, z[jrow*ldz+incol+1+j2:], ldz, wv[kzs:], ldwv) - - // Multiply by U12. - impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, - 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) - - // Multiply by U11. - bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, - 1, z[jrow*ldz+incol+1:], ldz, u, ldu, - 1, wv, ldwv) - - // Copy left of Z to right of scratch. - impl.Dlacpy(blas.All, jlen, j2, z[jrow*ldz+incol+1:], ldz, wv[i2:], ldwv) - - // Multiply by U21. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, - 1, u[i2:], ldu, wv[i2:], ldwv) - - // Multiply by U22. - bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, - 1, z[jrow*ldz+incol+1+j2:], ldz, u[j2*ldu+i2:], ldu, - 1, wv[i2:], ldwv) - - // Copy the result back to Z. - impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, z[jrow*ldz+incol+1:], ldz) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go deleted file mode 100644 index 9fc97a328..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlarf applies an elementary reflector to a general rectangular matrix c. -// This computes -// c = h * c if side == Left -// c = c * h if side == right -// where -// h = 1 - tau * v * v^T -// and c is an m * n matrix. -// -// work is temporary storage of length at least m if side == Left and at least -// n if side == Right. This function will panic if this length requirement is not met. -// -// Dlarf is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlarf(side blas.Side, m, n int, v []float64, incv int, tau float64, c []float64, ldc int, work []float64) { - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case incv == 0: - panic(zeroIncV) - case ldc < max(1, n): - panic(badLdC) - } - - if m == 0 || n == 0 { - return - } - - applyleft := side == blas.Left - lenV := n - if applyleft { - lenV = m - } - - switch { - case len(v) < 1+(lenV-1)*abs(incv): - panic(shortV) - case len(c) < (m-1)*ldc+n: - panic(shortC) - case (applyleft && len(work) < n) || (!applyleft && len(work) < m): - panic(shortWork) - } - - lastv := 0 // last non-zero element of v - lastc := 0 // last non-zero row/column of c - if tau != 0 { - var i int - if applyleft { - lastv = m - 1 - } else { - lastv = n - 1 - } - if incv > 0 { - i = lastv * incv - } - - // Look for the last non-zero row in v. - for lastv >= 0 && v[i] == 0 { - lastv-- - i -= incv - } - if applyleft { - // Scan for the last non-zero column in C[0:lastv, :] - lastc = impl.Iladlc(lastv+1, n, c, ldc) - } else { - // Scan for the last non-zero row in C[:, 0:lastv] - lastc = impl.Iladlr(m, lastv+1, c, ldc) - } - } - if lastv == -1 || lastc == -1 { - return - } - // Sometimes 1-indexing is nicer ... - bi := blas64.Implementation() - if applyleft { - // Form H * C - // w[0:lastc+1] = c[1:lastv+1, 1:lastc+1]^T * v[1:lastv+1,1] - bi.Dgemv(blas.Trans, lastv+1, lastc+1, 1, c, ldc, v, incv, 0, work, 1) - // c[0: lastv, 0: lastc] = c[...] - w[0:lastv, 1] * v[1:lastc, 1]^T - bi.Dger(lastv+1, lastc+1, -tau, v, incv, work, 1, c, ldc) - return - } - // Form C*H - // w[0:lastc+1,1] := c[0:lastc+1,0:lastv+1] * v[0:lastv+1,1] - bi.Dgemv(blas.NoTrans, lastc+1, lastv+1, 1, c, ldc, v, incv, 0, work, 1) - // c[0:lastc+1,0:lastv+1] = c[...] - w[0:lastc+1,0] * v[0:lastv+1,0]^T - bi.Dger(lastc+1, lastv+1, -tau, work, 1, v, incv, c, ldc) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go deleted file mode 100644 index 4dd8e063a..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dlarfb applies a block reflector to a matrix. -// -// In the call to Dlarfb, the mxn c is multiplied by the implicitly defined matrix h as follows: -// c = h * c if side == Left and trans == NoTrans -// c = c * h if side == Right and trans == NoTrans -// c = h^T * c if side == Left and trans == Trans -// c = c * h^T if side == Right and trans == Trans -// h is a product of elementary reflectors. direct sets the direction of multiplication -// h = h_1 * h_2 * ... * h_k if direct == Forward -// h = h_k * h_k-1 * ... * h_1 if direct == Backward -// The combination of direct and store defines the orientation of the elementary -// reflectors. In all cases the ones on the diagonal are implicitly represented. -// -// If direct == lapack.Forward and store == lapack.ColumnWise -// V = [ 1 ] -// [v1 1 ] -// [v1 v2 1] -// [v1 v2 v3] -// [v1 v2 v3] -// If direct == lapack.Forward and store == lapack.RowWise -// V = [ 1 v1 v1 v1 v1] -// [ 1 v2 v2 v2] -// [ 1 v3 v3] -// If direct == lapack.Backward and store == lapack.ColumnWise -// V = [v1 v2 v3] -// [v1 v2 v3] -// [ 1 v2 v3] -// [ 1 v3] -// [ 1] -// If direct == lapack.Backward and store == lapack.RowWise -// V = [v1 v1 1 ] -// [v2 v2 v2 1 ] -// [v3 v3 v3 v3 1] -// An elementary reflector can be explicitly constructed by extracting the -// corresponding elements of v, placing a 1 where the diagonal would be, and -// placing zeros in the remaining elements. -// -// t is a k×k matrix containing the block reflector, and this function will panic -// if t is not of sufficient size. See Dlarft for more information. -// -// work is a temporary storage matrix with stride ldwork. -// work must be of size at least n×k side == Left and m×k if side == Right, and -// this function will panic if this size is not met. -// -// Dlarfb is an internal routine. It is exported for testing purposes. -func (Implementation) Dlarfb(side blas.Side, trans blas.Transpose, direct lapack.Direct, store lapack.StoreV, m, n, k int, v []float64, ldv int, t []float64, ldt int, c []float64, ldc int, work []float64, ldwork int) { - nv := m - if side == blas.Right { - nv = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case trans != blas.Trans && trans != blas.NoTrans: - panic(badTrans) - case direct != lapack.Forward && direct != lapack.Backward: - panic(badDirect) - case store != lapack.ColumnWise && store != lapack.RowWise: - panic(badStoreV) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case store == lapack.ColumnWise && ldv < max(1, k): - panic(badLdV) - case store == lapack.RowWise && ldv < max(1, nv): - panic(badLdV) - case ldt < max(1, k): - panic(badLdT) - case ldc < max(1, n): - panic(badLdC) - case ldwork < max(1, k): - panic(badLdWork) - } - - if m == 0 || n == 0 { - return - } - - nw := n - if side == blas.Right { - nw = m - } - switch { - case store == lapack.ColumnWise && len(v) < (nv-1)*ldv+k: - panic(shortV) - case store == lapack.RowWise && len(v) < (k-1)*ldv+nv: - panic(shortV) - case len(t) < (k-1)*ldt+k: - panic(shortT) - case len(c) < (m-1)*ldc+n: - panic(shortC) - case len(work) < (nw-1)*ldwork+k: - panic(shortWork) - } - - bi := blas64.Implementation() - - transt := blas.Trans - if trans == blas.Trans { - transt = blas.NoTrans - } - // TODO(btracey): This follows the original Lapack code where the - // elements are copied into the columns of the working array. The - // loops should go in the other direction so the data is written - // into the rows of work so the copy is not strided. A bigger change - // would be to replace work with work^T, but benchmarks would be - // needed to see if the change is merited. - if store == lapack.ColumnWise { - if direct == lapack.Forward { - // V1 is the first k rows of C. V2 is the remaining rows. - if side == blas.Left { - // W = C^T V = C1^T V1 + C2^T V2 (stored in work). - - // W = C1. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) - } - // W = W * V1. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, - n, k, 1, - v, ldv, - work, ldwork) - if m > k { - // W = W + C2^T V2. - bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, - 1, c[k*ldc:], ldc, v[k*ldv:], ldv, - 1, work, ldwork) - } - // W = W * T^T or W * T. - bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V * W^T. - if m > k { - // C2 -= V2 * W^T. - bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, - -1, v[k*ldv:], ldv, work, ldwork, - 1, c[k*ldc:], ldc) - } - // W *= V1^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, - 1, v, ldv, - work, ldwork) - // C1 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[j*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C = C * H or C * H^T, where C = (C1 C2). - - // W = C1. - for i := 0; i < k; i++ { - bi.Dcopy(m, c[i:], ldc, work[i:], ldwork) - } - // W *= V1. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, - 1, c[k:], ldc, v[k*ldv:], ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, - -1, work, ldwork, v[k*ldv:], ldv, - 1, c[k:], ldc) - } - // C -= W * V^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - // C -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+j] -= work[i*ldwork+j] - } - } - return - } - // V = (V1) - // = (V2) (last k rows) - // Where V2 is unit upper triangular. - if side == blas.Left { - // Form H * C or - // W = C^T V. - - // W = C2^T. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) - } - // W *= V2. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, - 1, v[(m-k)*ldv:], ldv, - work, ldwork) - if m > k { - // W += C1^T * V1. - bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V * W^T. - if m > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, - -1, v, ldv, work, ldwork, - 1, c, ldc) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, - 1, v[(m-k)*ldv:], ldv, - work, ldwork) - // C2 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[(m-k+j)*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C * H or C * H^T where C = (C1 C2). - // W = C * V. - - // W = C2. - for j := 0; j < k; j++ { - bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) - } - - // W = W * V2. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, - 1, v[(n-k)*ldv:], ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - // C -= W * V^T. - if n > k { - // C1 -= W * V1^T. - bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, - -1, work, ldwork, v, ldv, - 1, c, ldc) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, - 1, v[(n-k)*ldv:], ldv, - work, ldwork) - // C2 -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+n-k+j] -= work[i*ldwork+j] - } - } - return - } - // Store = Rowwise. - if direct == lapack.Forward { - // V = (V1 V2) where v1 is unit upper triangular. - if side == blas.Left { - // Form H * C or H^T * C where C = (C1; C2). - // W = C^T * V^T. - - // W = C1^T. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) - } - // W *= V1^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, - 1, v, ldv, - work, ldwork) - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, - 1, c[k*ldc:], ldc, v[k:], ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V^T * W^T. - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, - -1, v[k:], ldv, work, ldwork, - 1, c[k*ldc:], ldc) - } - // W *= V1. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, - 1, v, ldv, - work, ldwork) - // C1 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[j*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C * H or C * H^T where C = (C1 C2). - // W = C * V^T. - - // W = C1. - for j := 0; j < k; j++ { - bi.Dcopy(m, c[j:], ldc, work[j:], ldwork) - } - // W *= V1^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, - 1, c[k:], ldc, v[k:], ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - // C -= W * V. - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, - -1, work, ldwork, v[k:], ldv, - 1, c[k:], ldc) - } - // W *= V1. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - // C1 -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+j] -= work[i*ldwork+j] - } - } - return - } - // V = (V1 V2) where V2 is the last k columns and is lower unit triangular. - if side == blas.Left { - // Form H * C or H^T C where C = (C1 ; C2). - // W = C^T * V^T. - - // W = C2^T. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, - 1, v[m-k:], ldv, - work, ldwork) - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V^T * W^T. - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, - -1, v, ldv, work, ldwork, - 1, c, ldc) - } - // W *= V2. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, k, - 1, v[m-k:], ldv, - work, ldwork) - // C2 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[(m-k+j)*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C * H or C * H^T where C = (C1 C2). - // W = C * V^T. - // W = C2. - for j := 0; j < k; j++ { - bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, - 1, v[n-k:], ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - // C -= W * V. - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, - -1, work, ldwork, v, ldv, - 1, c, ldc) - } - // W *= V2. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, - 1, v[n-k:], ldv, - work, ldwork) - // C1 -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+n-k+j] -= work[i*ldwork+j] - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go deleted file mode 100644 index e037fdd6b..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlarfg generates an elementary reflector for a Householder matrix. It creates -// a real elementary reflector of order n such that -// H * (alpha) = (beta) -// ( x) ( 0) -// H^T * H = I -// H is represented in the form -// H = 1 - tau * (1; v) * (1 v^T) -// where tau is a real scalar. -// -// On entry, x contains the vector x, on exit it contains v. -// -// Dlarfg is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlarfg(n int, alpha float64, x []float64, incX int) (beta, tau float64) { - switch { - case n < 0: - panic(nLT0) - case incX <= 0: - panic(badIncX) - } - - if n <= 1 { - return alpha, 0 - } - - if len(x) < 1+(n-2)*abs(incX) { - panic(shortX) - } - - bi := blas64.Implementation() - - xnorm := bi.Dnrm2(n-1, x, incX) - if xnorm == 0 { - return alpha, 0 - } - beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) - safmin := dlamchS / dlamchE - knt := 0 - if math.Abs(beta) < safmin { - // xnorm and beta may be inaccurate, scale x and recompute. - rsafmn := 1 / safmin - for { - knt++ - bi.Dscal(n-1, rsafmn, x, incX) - beta *= rsafmn - alpha *= rsafmn - if math.Abs(beta) >= safmin { - break - } - } - xnorm = bi.Dnrm2(n-1, x, incX) - beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) - } - tau = (beta - alpha) / beta - bi.Dscal(n-1, 1/(alpha-beta), x, incX) - for j := 0; j < knt; j++ { - beta *= safmin - } - return beta, tau -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go deleted file mode 100644 index 8f03eb8b3..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dlarft forms the triangular factor T of a block reflector H, storing the answer -// in t. -// H = I - V * T * V^T if store == lapack.ColumnWise -// H = I - V^T * T * V if store == lapack.RowWise -// H is defined by a product of the elementary reflectors where -// H = H_0 * H_1 * ... * H_{k-1} if direct == lapack.Forward -// H = H_{k-1} * ... * H_1 * H_0 if direct == lapack.Backward -// -// t is a k×k triangular matrix. t is upper triangular if direct = lapack.Forward -// and lower triangular otherwise. This function will panic if t is not of -// sufficient size. -// -// store describes the storage of the elementary reflectors in v. See -// Dlarfb for a description of layout. -// -// tau contains the scalar factors of the elementary reflectors H_i. -// -// Dlarft is an internal routine. It is exported for testing purposes. -func (Implementation) Dlarft(direct lapack.Direct, store lapack.StoreV, n, k int, v []float64, ldv int, tau []float64, t []float64, ldt int) { - mv, nv := n, k - if store == lapack.RowWise { - mv, nv = k, n - } - switch { - case direct != lapack.Forward && direct != lapack.Backward: - panic(badDirect) - case store != lapack.RowWise && store != lapack.ColumnWise: - panic(badStoreV) - case n < 0: - panic(nLT0) - case k < 1: - panic(kLT1) - case ldv < max(1, nv): - panic(badLdV) - case len(tau) < k: - panic(shortTau) - case ldt < max(1, k): - panic(shortT) - } - - if n == 0 { - return - } - - switch { - case len(v) < (mv-1)*ldv+nv: - panic(shortV) - case len(t) < (k-1)*ldt+k: - panic(shortT) - } - - bi := blas64.Implementation() - - // TODO(btracey): There are a number of minor obvious loop optimizations here. - // TODO(btracey): It may be possible to rearrange some of the code so that - // index of 1 is more common in the Dgemv. - if direct == lapack.Forward { - prevlastv := n - 1 - for i := 0; i < k; i++ { - prevlastv = max(i, prevlastv) - if tau[i] == 0 { - for j := 0; j <= i; j++ { - t[j*ldt+i] = 0 - } - continue - } - var lastv int - if store == lapack.ColumnWise { - // skip trailing zeros - for lastv = n - 1; lastv >= i+1; lastv-- { - if v[lastv*ldv+i] != 0 { - break - } - } - for j := 0; j < i; j++ { - t[j*ldt+i] = -tau[i] * v[i*ldv+j] - } - j := min(lastv, prevlastv) - bi.Dgemv(blas.Trans, j-i, i, - -tau[i], v[(i+1)*ldv:], ldv, v[(i+1)*ldv+i:], ldv, - 1, t[i:], ldt) - } else { - for lastv = n - 1; lastv >= i+1; lastv-- { - if v[i*ldv+lastv] != 0 { - break - } - } - for j := 0; j < i; j++ { - t[j*ldt+i] = -tau[i] * v[j*ldv+i] - } - j := min(lastv, prevlastv) - bi.Dgemv(blas.NoTrans, i, j-i, - -tau[i], v[i+1:], ldv, v[i*ldv+i+1:], 1, - 1, t[i:], ldt) - } - bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, t, ldt, t[i:], ldt) - t[i*ldt+i] = tau[i] - if i > 1 { - prevlastv = max(prevlastv, lastv) - } else { - prevlastv = lastv - } - } - return - } - prevlastv := 0 - for i := k - 1; i >= 0; i-- { - if tau[i] == 0 { - for j := i; j < k; j++ { - t[j*ldt+i] = 0 - } - continue - } - var lastv int - if i < k-1 { - if store == lapack.ColumnWise { - for lastv = 0; lastv < i; lastv++ { - if v[lastv*ldv+i] != 0 { - break - } - } - for j := i + 1; j < k; j++ { - t[j*ldt+i] = -tau[i] * v[(n-k+i)*ldv+j] - } - j := max(lastv, prevlastv) - bi.Dgemv(blas.Trans, n-k+i-j, k-i-1, - -tau[i], v[j*ldv+i+1:], ldv, v[j*ldv+i:], ldv, - 1, t[(i+1)*ldt+i:], ldt) - } else { - for lastv = 0; lastv < i; lastv++ { - if v[i*ldv+lastv] != 0 { - break - } - } - for j := i + 1; j < k; j++ { - t[j*ldt+i] = -tau[i] * v[j*ldv+n-k+i] - } - j := max(lastv, prevlastv) - bi.Dgemv(blas.NoTrans, k-i-1, n-k+i-j, - -tau[i], v[(i+1)*ldv+j:], ldv, v[i*ldv+j:], 1, - 1, t[(i+1)*ldt+i:], ldt) - } - bi.Dtrmv(blas.Lower, blas.NoTrans, blas.NonUnit, k-i-1, - t[(i+1)*ldt+i+1:], ldt, - t[(i+1)*ldt+i:], ldt) - if i > 0 { - prevlastv = min(prevlastv, lastv) - } else { - prevlastv = lastv - } - } - t[i*ldt+i] = tau[i] - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go deleted file mode 100644 index d7928c8cf..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dlarfx applies an elementary reflector H to a real m×n matrix C, from either -// the left or the right, with loop unrolling when the reflector has order less -// than 11. -// -// H is represented in the form -// H = I - tau * v * v^T, -// where tau is a real scalar and v is a real vector. If tau = 0, then H is -// taken to be the identity matrix. -// -// v must have length equal to m if side == blas.Left, and equal to n if side == -// blas.Right, otherwise Dlarfx will panic. -// -// c and ldc represent the m×n matrix C. On return, C is overwritten by the -// matrix H * C if side == blas.Left, or C * H if side == blas.Right. -// -// work must have length at least n if side == blas.Left, and at least m if side -// == blas.Right, otherwise Dlarfx will panic. work is not referenced if H has -// order < 11. -// -// Dlarfx is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlarfx(side blas.Side, m, n int, v []float64, tau float64, c []float64, ldc int, work []float64) { - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - nh := m - lwork := n - if side == blas.Right { - nh = n - lwork = m - } - switch { - case len(v) < nh: - panic(shortV) - case len(c) < (m-1)*ldc+n: - panic(shortC) - case nh > 10 && len(work) < lwork: - panic(shortWork) - } - - if tau == 0 { - return - } - - if side == blas.Left { - // Form H * C, where H has order m. - switch m { - default: // Code for general m. - impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) - return - - case 0: // No-op for zero size matrix. - return - - case 1: // Special code for 1×1 Householder matrix. - t0 := 1 - tau*v[0]*v[0] - for j := 0; j < n; j++ { - c[j] *= t0 - } - return - - case 2: // Special code for 2×2 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - } - return - - case 3: // Special code for 3×3 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - } - return - - case 4: // Special code for 4×4 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - c[3*ldc+j] -= sum * t3 - } - return - - case 5: // Special code for 5×5 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - c[3*ldc+j] -= sum * t3 - c[4*ldc+j] -= sum * t4 - } - return - - case 6: // Special code for 6×6 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + - v5*c[5*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - c[3*ldc+j] -= sum * t3 - c[4*ldc+j] -= sum * t4 - c[5*ldc+j] -= sum * t5 - } - return - - case 7: // Special code for 7×7 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + - v5*c[5*ldc+j] + v6*c[6*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - c[3*ldc+j] -= sum * t3 - c[4*ldc+j] -= sum * t4 - c[5*ldc+j] -= sum * t5 - c[6*ldc+j] -= sum * t6 - } - return - - case 8: // Special code for 8×8 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - v7 := v[7] - t7 := tau * v7 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + - v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - c[3*ldc+j] -= sum * t3 - c[4*ldc+j] -= sum * t4 - c[5*ldc+j] -= sum * t5 - c[6*ldc+j] -= sum * t6 - c[7*ldc+j] -= sum * t7 - } - return - - case 9: // Special code for 9×9 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - v7 := v[7] - t7 := tau * v7 - v8 := v[8] - t8 := tau * v8 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + - v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - c[3*ldc+j] -= sum * t3 - c[4*ldc+j] -= sum * t4 - c[5*ldc+j] -= sum * t5 - c[6*ldc+j] -= sum * t6 - c[7*ldc+j] -= sum * t7 - c[8*ldc+j] -= sum * t8 - } - return - - case 10: // Special code for 10×10 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - v7 := v[7] - t7 := tau * v7 - v8 := v[8] - t8 := tau * v8 - v9 := v[9] - t9 := tau * v9 - for j := 0; j < n; j++ { - sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + - v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] + v9*c[9*ldc+j] - c[j] -= sum * t0 - c[ldc+j] -= sum * t1 - c[2*ldc+j] -= sum * t2 - c[3*ldc+j] -= sum * t3 - c[4*ldc+j] -= sum * t4 - c[5*ldc+j] -= sum * t5 - c[6*ldc+j] -= sum * t6 - c[7*ldc+j] -= sum * t7 - c[8*ldc+j] -= sum * t8 - c[9*ldc+j] -= sum * t9 - } - return - } - } - - // Form C * H, where H has order n. - switch n { - default: // Code for general n. - impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) - return - - case 0: // No-op for zero size matrix. - return - - case 1: // Special code for 1×1 Householder matrix. - t0 := 1 - tau*v[0]*v[0] - for j := 0; j < m; j++ { - c[j*ldc] *= t0 - } - return - - case 2: // Special code for 2×2 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - } - return - - case 3: // Special code for 3×3 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - } - return - - case 4: // Special code for 4×4 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - cs[3] -= sum * t3 - } - return - - case 5: // Special code for 5×5 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - cs[3] -= sum * t3 - cs[4] -= sum * t4 - } - return - - case 6: // Special code for 6×6 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + v5*cs[5] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - cs[3] -= sum * t3 - cs[4] -= sum * t4 - cs[5] -= sum * t5 - } - return - - case 7: // Special code for 7×7 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + - v5*cs[5] + v6*cs[6] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - cs[3] -= sum * t3 - cs[4] -= sum * t4 - cs[5] -= sum * t5 - cs[6] -= sum * t6 - } - return - - case 8: // Special code for 8×8 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - v7 := v[7] - t7 := tau * v7 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + - v5*cs[5] + v6*cs[6] + v7*cs[7] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - cs[3] -= sum * t3 - cs[4] -= sum * t4 - cs[5] -= sum * t5 - cs[6] -= sum * t6 - cs[7] -= sum * t7 - } - return - - case 9: // Special code for 9×9 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - v7 := v[7] - t7 := tau * v7 - v8 := v[8] - t8 := tau * v8 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + - v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - cs[3] -= sum * t3 - cs[4] -= sum * t4 - cs[5] -= sum * t5 - cs[6] -= sum * t6 - cs[7] -= sum * t7 - cs[8] -= sum * t8 - } - return - - case 10: // Special code for 10×10 Householder matrix. - v0 := v[0] - t0 := tau * v0 - v1 := v[1] - t1 := tau * v1 - v2 := v[2] - t2 := tau * v2 - v3 := v[3] - t3 := tau * v3 - v4 := v[4] - t4 := tau * v4 - v5 := v[5] - t5 := tau * v5 - v6 := v[6] - t6 := tau * v6 - v7 := v[7] - t7 := tau * v7 - v8 := v[8] - t8 := tau * v8 - v9 := v[9] - t9 := tau * v9 - for j := 0; j < m; j++ { - cs := c[j*ldc:] - sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + - v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] + v9*cs[9] - cs[0] -= sum * t0 - cs[1] -= sum * t1 - cs[2] -= sum * t2 - cs[3] -= sum * t3 - cs[4] -= sum * t4 - cs[5] -= sum * t5 - cs[6] -= sum * t6 - cs[7] -= sum * t7 - cs[8] -= sum * t8 - cs[9] -= sum * t9 - } - return - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go deleted file mode 100644 index ad6454613..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlartg generates a plane rotation so that -// [ cs sn] * [f] = [r] -// [-sn cs] [g] = [0] -// This is a more accurate version of BLAS drotg, with the other differences that -// if g = 0, then cs = 1 and sn = 0, and if f = 0 and g != 0, then cs = 0 and sn = 1. -// If abs(f) > abs(g), cs will be positive. -// -// Dlartg is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlartg(f, g float64) (cs, sn, r float64) { - safmn2 := math.Pow(dlamchB, math.Trunc(math.Log(dlamchS/dlamchE)/math.Log(dlamchB)/2)) - safmx2 := 1 / safmn2 - if g == 0 { - cs = 1 - sn = 0 - r = f - return cs, sn, r - } - if f == 0 { - cs = 0 - sn = 1 - r = g - return cs, sn, r - } - f1 := f - g1 := g - scale := math.Max(math.Abs(f1), math.Abs(g1)) - if scale >= safmx2 { - var count int - for { - count++ - f1 *= safmn2 - g1 *= safmn2 - scale = math.Max(math.Abs(f1), math.Abs(g1)) - if scale < safmx2 { - break - } - } - r = math.Sqrt(f1*f1 + g1*g1) - cs = f1 / r - sn = g1 / r - for i := 0; i < count; i++ { - r *= safmx2 - } - } else if scale <= safmn2 { - var count int - for { - count++ - f1 *= safmx2 - g1 *= safmx2 - scale = math.Max(math.Abs(f1), math.Abs(g1)) - if scale >= safmn2 { - break - } - } - r = math.Sqrt(f1*f1 + g1*g1) - cs = f1 / r - sn = g1 / r - for i := 0; i < count; i++ { - r *= safmn2 - } - } else { - r = math.Sqrt(f1*f1 + g1*g1) - cs = f1 / r - sn = g1 / r - } - if math.Abs(f) > math.Abs(g) && cs < 0 { - cs *= -1 - sn *= -1 - r *= -1 - } - return cs, sn, r -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go deleted file mode 100644 index 9922b4aa7..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlas2 computes the singular values of the 2×2 matrix defined by -// [F G] -// [0 H] -// The smaller and larger singular values are returned in that order. -// -// Dlas2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlas2(f, g, h float64) (ssmin, ssmax float64) { - fa := math.Abs(f) - ga := math.Abs(g) - ha := math.Abs(h) - fhmin := math.Min(fa, ha) - fhmax := math.Max(fa, ha) - if fhmin == 0 { - if fhmax == 0 { - return 0, ga - } - v := math.Min(fhmax, ga) / math.Max(fhmax, ga) - return 0, math.Max(fhmax, ga) * math.Sqrt(1+v*v) - } - if ga < fhmax { - as := 1 + fhmin/fhmax - at := (fhmax - fhmin) / fhmax - au := (ga / fhmax) * (ga / fhmax) - c := 2 / (math.Sqrt(as*as+au) + math.Sqrt(at*at+au)) - return fhmin * c, fhmax / c - } - au := fhmax / ga - if au == 0 { - return fhmin * fhmax / ga, ga - } - as := 1 + fhmin/fhmax - at := (fhmax - fhmin) / fhmax - c := 1 / (math.Sqrt(1+(as*au)*(as*au)) + math.Sqrt(1+(at*au)*(at*au))) - return 2 * (fhmin * c) * au, ga / (c + c) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go deleted file mode 100644 index 61c4eb79c..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/lapack" -) - -// Dlascl multiplies an m×n matrix by the scalar cto/cfrom. -// -// cfrom must not be zero, and cto and cfrom must not be NaN, otherwise Dlascl -// will panic. -// -// Dlascl is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlascl(kind lapack.MatrixType, kl, ku int, cfrom, cto float64, m, n int, a []float64, lda int) { - switch kind { - default: - panic(badMatrixType) - case 'H', 'B', 'Q', 'Z': // See dlascl.f. - panic("not implemented") - case lapack.General, lapack.UpperTri, lapack.LowerTri: - if lda < max(1, n) { - panic(badLdA) - } - } - switch { - case cfrom == 0: - panic(zeroCFrom) - case math.IsNaN(cfrom): - panic(nanCFrom) - case math.IsNaN(cto): - panic(nanCTo) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - } - - if n == 0 || m == 0 { - return - } - - switch kind { - case lapack.General, lapack.UpperTri, lapack.LowerTri: - if len(a) < (m-1)*lda+n { - panic(shortA) - } - } - - smlnum := dlamchS - bignum := 1 / smlnum - cfromc := cfrom - ctoc := cto - cfrom1 := cfromc * smlnum - for { - var done bool - var mul, ctol float64 - if cfrom1 == cfromc { - // cfromc is inf. - mul = ctoc / cfromc - done = true - ctol = ctoc - } else { - ctol = ctoc / bignum - if ctol == ctoc { - // ctoc is either 0 or inf. - mul = ctoc - done = true - cfromc = 1 - } else if math.Abs(cfrom1) > math.Abs(ctoc) && ctoc != 0 { - mul = smlnum - done = false - cfromc = cfrom1 - } else if math.Abs(ctol) > math.Abs(cfromc) { - mul = bignum - done = false - ctoc = ctol - } else { - mul = ctoc / cfromc - done = true - } - } - switch kind { - case lapack.General: - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - a[i*lda+j] = a[i*lda+j] * mul - } - } - case lapack.UpperTri: - for i := 0; i < m; i++ { - for j := i; j < n; j++ { - a[i*lda+j] = a[i*lda+j] * mul - } - } - case lapack.LowerTri: - for i := 0; i < m; i++ { - for j := 0; j <= min(i, n-1); j++ { - a[i*lda+j] = a[i*lda+j] * mul - } - } - } - if done { - break - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go deleted file mode 100644 index b4e63916f..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dlaset sets the off-diagonal elements of A to alpha, and the diagonal -// elements to beta. If uplo == blas.Upper, only the elements in the upper -// triangular part are set. If uplo == blas.Lower, only the elements in the -// lower triangular part are set. If uplo is otherwise, all of the elements of A -// are set. -// -// Dlaset is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaset(uplo blas.Uplo, m, n int, alpha, beta float64, a []float64, lda int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - minmn := min(m, n) - if minmn == 0 { - return - } - - if len(a) < (m-1)*lda+n { - panic(shortA) - } - - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i + 1; j < n; j++ { - a[i*lda+j] = alpha - } - } - } else if uplo == blas.Lower { - for i := 0; i < m; i++ { - for j := 0; j < min(i+1, n); j++ { - a[i*lda+j] = alpha - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - a[i*lda+j] = alpha - } - } - } - for i := 0; i < minmn; i++ { - a[i*lda+i] = beta - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go deleted file mode 100644 index 1f1d1dc42..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dlasq1 computes the singular values of an n×n bidiagonal matrix with diagonal -// d and off-diagonal e. On exit, d contains the singular values in decreasing -// order, and e is overwritten. d must have length at least n, e must have -// length at least n-1, and the input work must have length at least 4*n. Dlasq1 -// will panic if these conditions are not met. -// -// Dlasq1 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasq1(n int, d, e, work []float64) (info int) { - if n < 0 { - panic(nLT0) - } - - if n == 0 { - return info - } - - switch { - case len(d) < n: - panic(shortD) - case len(e) < n-1: - panic(shortE) - case len(work) < 4*n: - panic(shortWork) - } - - if n == 1 { - d[0] = math.Abs(d[0]) - return info - } - - if n == 2 { - d[1], d[0] = impl.Dlas2(d[0], e[0], d[1]) - return info - } - - // Estimate the largest singular value. - var sigmx float64 - for i := 0; i < n-1; i++ { - d[i] = math.Abs(d[i]) - sigmx = math.Max(sigmx, math.Abs(e[i])) - } - d[n-1] = math.Abs(d[n-1]) - // Early return if sigmx is zero (matrix is already diagonal). - if sigmx == 0 { - impl.Dlasrt(lapack.SortDecreasing, n, d) - return info - } - - for i := 0; i < n; i++ { - sigmx = math.Max(sigmx, d[i]) - } - - // Copy D and E into WORK (in the Z format) and scale (squaring the - // input data makes scaling by a power of the radix pointless). - - eps := dlamchP - safmin := dlamchS - scale := math.Sqrt(eps / safmin) - bi := blas64.Implementation() - bi.Dcopy(n, d, 1, work, 2) - bi.Dcopy(n-1, e, 1, work[1:], 2) - impl.Dlascl(lapack.General, 0, 0, sigmx, scale, 2*n-1, 1, work, 1) - - // Compute the q's and e's. - for i := 0; i < 2*n-1; i++ { - work[i] *= work[i] - } - work[2*n-1] = 0 - - info = impl.Dlasq2(n, work) - if info == 0 { - for i := 0; i < n; i++ { - d[i] = math.Sqrt(work[i]) - } - impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) - } else if info == 2 { - // Maximum number of iterations exceeded. Move data from work - // into D and E so the calling subroutine can try to finish. - for i := 0; i < n; i++ { - d[i] = math.Sqrt(work[2*i]) - e[i] = math.Sqrt(work[2*i+1]) - } - impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) - impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, e, 1) - } - return info -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go deleted file mode 100644 index fd24a5509..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/lapack" -) - -// Dlasq2 computes all the eigenvalues of the symmetric positive -// definite tridiagonal matrix associated with the qd array Z. Eigevalues -// are computed to high relative accuracy avoiding denormalization, underflow -// and overflow. -// -// To see the relation of Z to the tridiagonal matrix, let L be a -// unit lower bidiagonal matrix with sub-diagonals Z(2,4,6,,..) and -// let U be an upper bidiagonal matrix with 1's above and diagonal -// Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the -// symmetric tridiagonal to which it is similar. -// -// info returns a status error. The return codes mean as follows: -// 0: The algorithm completed successfully. -// 1: A split was marked by a positive value in e. -// 2: Current block of Z not diagonalized after 100*n iterations (in inner -// while loop). On exit Z holds a qd array with the same eigenvalues as -// the given Z. -// 3: Termination criterion of outer while loop not met (program created more -// than N unreduced blocks). -// -// z must have length at least 4*n, and must not contain any negative elements. -// Dlasq2 will panic otherwise. -// -// Dlasq2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasq2(n int, z []float64) (info int) { - if n < 0 { - panic(nLT0) - } - - if n == 0 { - return info - } - - if len(z) < 4*n { - panic(shortZ) - } - - if n == 1 { - if z[0] < 0 { - panic(negZ) - } - return info - } - - const cbias = 1.5 - - eps := dlamchP - safmin := dlamchS - tol := eps * 100 - tol2 := tol * tol - if n == 2 { - if z[1] < 0 || z[2] < 0 { - panic(negZ) - } else if z[2] > z[0] { - z[0], z[2] = z[2], z[0] - } - z[4] = z[0] + z[1] + z[2] - if z[1] > z[2]*tol2 { - t := 0.5 * (z[0] - z[2] + z[1]) - s := z[2] * (z[1] / t) - if s <= t { - s = z[2] * (z[1] / (t * (1 + math.Sqrt(1+s/t)))) - } else { - s = z[2] * (z[1] / (t + math.Sqrt(t)*math.Sqrt(t+s))) - } - t = z[0] + s + z[1] - z[2] *= z[0] / t - z[0] = t - } - z[1] = z[2] - z[5] = z[1] + z[0] - return info - } - // Check for negative data and compute sums of q's and e's. - z[2*n-1] = 0 - emin := z[1] - var d, e, qmax float64 - var i1, n1 int - for k := 0; k < 2*(n-1); k += 2 { - if z[k] < 0 || z[k+1] < 0 { - panic(negZ) - } - d += z[k] - e += z[k+1] - qmax = math.Max(qmax, z[k]) - emin = math.Min(emin, z[k+1]) - } - if z[2*(n-1)] < 0 { - panic(negZ) - } - d += z[2*(n-1)] - // Check for diagonality. - if e == 0 { - for k := 1; k < n; k++ { - z[k] = z[2*k] - } - impl.Dlasrt(lapack.SortDecreasing, n, z) - z[2*(n-1)] = d - return info - } - trace := d + e - // Check for zero data. - if trace == 0 { - z[2*(n-1)] = 0 - return info - } - // Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). - for k := 2 * n; k >= 2; k -= 2 { - z[2*k-1] = 0 - z[2*k-2] = z[k-1] - z[2*k-3] = 0 - z[2*k-4] = z[k-2] - } - i0 := 0 - n0 := n - 1 - - // Reverse the qd-array, if warranted. - // z[4*i0-3] --> z[4*(i0+1)-3-1] --> z[4*i0] - if cbias*z[4*i0] < z[4*n0] { - ipn4Out := 4 * (i0 + n0 + 2) - for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { - i4 := i4loop - 1 - ipn4 := ipn4Out - 1 - z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] - z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] - } - } - - // Initial split checking via dqd and Li's test. - pp := 0 - for k := 0; k < 2; k++ { - d = z[4*n0+pp] - for i4loop := 4*n0 + pp; i4loop >= 4*(i0+1)+pp; i4loop -= 4 { - i4 := i4loop - 1 - if z[i4-1] <= tol2*d { - z[i4-1] = math.Copysign(0, -1) - d = z[i4-3] - } else { - d = z[i4-3] * (d / (d + z[i4-1])) - } - } - // dqd maps Z to ZZ plus Li's test. - emin = z[4*(i0+1)+pp] - d = z[4*i0+pp] - for i4loop := 4*(i0+1) + pp; i4loop <= 4*n0+pp; i4loop += 4 { - i4 := i4loop - 1 - z[i4-2*pp-2] = d + z[i4-1] - if z[i4-1] <= tol2*d { - z[i4-1] = math.Copysign(0, -1) - z[i4-2*pp-2] = d - z[i4-2*pp] = 0 - d = z[i4+1] - } else if safmin*z[i4+1] < z[i4-2*pp-2] && safmin*z[i4-2*pp-2] < z[i4+1] { - tmp := z[i4+1] / z[i4-2*pp-2] - z[i4-2*pp] = z[i4-1] * tmp - d *= tmp - } else { - z[i4-2*pp] = z[i4+1] * (z[i4-1] / z[i4-2*pp-2]) - d = z[i4+1] * (d / z[i4-2*pp-2]) - } - emin = math.Min(emin, z[i4-2*pp]) - } - z[4*(n0+1)-pp-3] = d - - // Now find qmax. - qmax = z[4*(i0+1)-pp-3] - for i4loop := 4*(i0+1) - pp + 2; i4loop <= 4*(n0+1)+pp-2; i4loop += 4 { - i4 := i4loop - 1 - qmax = math.Max(qmax, z[i4]) - } - // Prepare for the next iteration on K. - pp = 1 - pp - } - - // Initialise variables to pass to DLASQ3. - var ttype int - var dmin1, dmin2, dn, dn1, dn2, g, tau float64 - var tempq float64 - iter := 2 - var nFail int - nDiv := 2 * (n0 - i0) - var i4 int -outer: - for iwhila := 1; iwhila <= n+1; iwhila++ { - // Test for completion. - if n0 < 0 { - // Move q's to the front. - for k := 1; k < n; k++ { - z[k] = z[4*k] - } - // Sort and compute sum of eigenvalues. - impl.Dlasrt(lapack.SortDecreasing, n, z) - e = 0 - for k := n - 1; k >= 0; k-- { - e += z[k] - } - // Store trace, sum(eigenvalues) and information on performance. - z[2*n] = trace - z[2*n+1] = e - z[2*n+2] = float64(iter) - z[2*n+3] = float64(nDiv) / float64(n*n) - z[2*n+4] = 100 * float64(nFail) / float64(iter) - return info - } - - // While array unfinished do - // e[n0] holds the value of sigma when submatrix in i0:n0 - // splits from the rest of the array, but is negated. - var desig float64 - var sigma float64 - if n0 != n-1 { - sigma = -z[4*(n0+1)-2] - } - if sigma < 0 { - info = 1 - return info - } - // Find last unreduced submatrix's top index i0, find qmax and - // emin. Find Gershgorin-type bound if Q's much greater than E's. - var emax float64 - if n0 > i0 { - emin = math.Abs(z[4*(n0+1)-6]) - } else { - emin = 0 - } - qmin := z[4*(n0+1)-4] - qmax = qmin - zSmall := false - for i4loop := 4 * (n0 + 1); i4loop >= 8; i4loop -= 4 { - i4 = i4loop - 1 - if z[i4-5] <= 0 { - zSmall = true - break - } - if qmin >= 4*emax { - qmin = math.Min(qmin, z[i4-3]) - emax = math.Max(emax, z[i4-5]) - } - qmax = math.Max(qmax, z[i4-7]+z[i4-5]) - emin = math.Min(emin, z[i4-5]) - } - if !zSmall { - i4 = 3 - } - i0 = (i4+1)/4 - 1 - pp = 0 - if n0-i0 > 1 { - dee := z[4*i0] - deemin := dee - kmin := i0 - for i4loop := 4*(i0+1) + 1; i4loop <= 4*(n0+1)-3; i4loop += 4 { - i4 := i4loop - 1 - dee = z[i4] * (dee / (dee + z[i4-2])) - if dee <= deemin { - deemin = dee - kmin = (i4+4)/4 - 1 - } - } - if (kmin-i0)*2 < n0-kmin && deemin <= 0.5*z[4*n0] { - ipn4Out := 4 * (i0 + n0 + 2) - pp = 2 - for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { - i4 := i4loop - 1 - ipn4 := ipn4Out - 1 - z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] - z[i4-2], z[ipn4-i4-3] = z[ipn4-i4-3], z[i4-2] - z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] - z[i4], z[ipn4-i4-5] = z[ipn4-i4-5], z[i4] - } - } - } - // Put -(initial shift) into DMIN. - dmin := -math.Max(0, qmin-2*math.Sqrt(qmin)*math.Sqrt(emax)) - - // Now i0:n0 is unreduced. - // PP = 0 for ping, PP = 1 for pong. - // PP = 2 indicates that flipping was applied to the Z array and - // and that the tests for deflation upon entry in Dlasq3 - // should not be performed. - nbig := 100 * (n0 - i0 + 1) - for iwhilb := 0; iwhilb < nbig; iwhilb++ { - if i0 > n0 { - continue outer - } - - // While submatrix unfinished take a good dqds step. - i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau = - impl.Dlasq3(i0, n0, z, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau) - - pp = 1 - pp - // When emin is very small check for splits. - if pp == 0 && n0-i0 >= 3 { - if z[4*(n0+1)-1] <= tol2*qmax || z[4*(n0+1)-2] <= tol2*sigma { - splt := i0 - 1 - qmax = z[4*i0] - emin = z[4*(i0+1)-2] - oldemn := z[4*(i0+1)-1] - for i4loop := 4 * (i0 + 1); i4loop <= 4*(n0-2); i4loop += 4 { - i4 := i4loop - 1 - if z[i4] <= tol2*z[i4-3] || z[i4-1] <= tol2*sigma { - z[i4-1] = -sigma - splt = i4 / 4 - qmax = 0 - emin = z[i4+3] - oldemn = z[i4+4] - } else { - qmax = math.Max(qmax, z[i4+1]) - emin = math.Min(emin, z[i4-1]) - oldemn = math.Min(oldemn, z[i4]) - } - } - z[4*(n0+1)-2] = emin - z[4*(n0+1)-1] = oldemn - i0 = splt + 1 - } - } - } - // Maximum number of iterations exceeded, restore the shift - // sigma and place the new d's and e's in a qd array. - // This might need to be done for several blocks. - info = 2 - i1 = i0 - for { - tempq = z[4*i0] - z[4*i0] += sigma - for k := i0 + 1; k <= n0; k++ { - tempe := z[4*(k+1)-6] - z[4*(k+1)-6] *= tempq / z[4*(k+1)-8] - tempq = z[4*k] - z[4*k] += sigma + tempe - z[4*(k+1)-6] - } - // Prepare to do this on the previous block if there is one. - if i1 <= 0 { - break - } - n1 = i1 - 1 - for i1 >= 1 && z[4*(i1+1)-6] >= 0 { - i1 -= 1 - } - sigma = -z[4*(n1+1)-2] - } - for k := 0; k < n; k++ { - z[2*k] = z[4*k] - // Only the block 1..N0 is unfinished. The rest of the e's - // must be essentially zero, although sometimes other data - // has been stored in them. - if k < n0 { - z[2*(k+1)-1] = z[4*(k+1)-1] - } else { - z[2*(k+1)] = 0 - } - } - return info - } - info = 3 - return info -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go deleted file mode 100644 index a05e94ef1..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlasq3 checks for deflation, computes a shift (tau) and calls dqds. -// In case of failure it changes shifts, and tries again until output -// is positive. -// -// Dlasq3 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasq3(i0, n0 int, z []float64, pp int, dmin, sigma, desig, qmax float64, nFail, iter, nDiv int, ttype int, dmin1, dmin2, dn, dn1, dn2, g, tau float64) ( - i0Out, n0Out, ppOut int, dminOut, sigmaOut, desigOut, qmaxOut float64, nFailOut, iterOut, nDivOut, ttypeOut int, dmin1Out, dmin2Out, dnOut, dn1Out, dn2Out, gOut, tauOut float64) { - switch { - case i0 < 0: - panic(i0LT0) - case n0 < 0: - panic(n0LT0) - case len(z) < 4*n0: - panic(shortZ) - case pp != 0 && pp != 1 && pp != 2: - panic(badPp) - } - - const cbias = 1.5 - - n0in := n0 - eps := dlamchP - tol := eps * 100 - tol2 := tol * tol - var nn int - var t float64 - for { - if n0 < i0 { - return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau - } - if n0 == i0 { - z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma - n0-- - continue - } - nn = 4*(n0+1) + pp - 1 - if n0 != i0+1 { - // Check whether e[n0-1] is negligible, 1 eigenvalue. - if z[nn-5] > tol2*(sigma+z[nn-3]) && z[nn-2*pp-4] > tol2*z[nn-7] { - // Check whether e[n0-2] is negligible, 2 eigenvalues. - if z[nn-9] > tol2*sigma && z[nn-2*pp-8] > tol2*z[nn-11] { - break - } - } else { - z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma - n0-- - continue - } - } - if z[nn-3] > z[nn-7] { - z[nn-3], z[nn-7] = z[nn-7], z[nn-3] - } - t = 0.5 * (z[nn-7] - z[nn-3] + z[nn-5]) - if z[nn-5] > z[nn-3]*tol2 && t != 0 { - s := z[nn-3] * (z[nn-5] / t) - if s <= t { - s = z[nn-3] * (z[nn-5] / (t * (1 + math.Sqrt(1+s/t)))) - } else { - s = z[nn-3] * (z[nn-5] / (t + math.Sqrt(t)*math.Sqrt(t+s))) - } - t = z[nn-7] + (s + z[nn-5]) - z[nn-3] *= z[nn-7] / t - z[nn-7] = t - } - z[4*(n0+1)-8] = z[nn-7] + sigma - z[4*(n0+1)-4] = z[nn-3] + sigma - n0 -= 2 - } - if pp == 2 { - pp = 0 - } - - // Reverse the qd-array, if warranted. - if dmin <= 0 || n0 < n0in { - if cbias*z[4*(i0+1)+pp-4] < z[4*(n0+1)+pp-4] { - ipn4Out := 4 * (i0 + n0 + 2) - for j4loop := 4 * (i0 + 1); j4loop <= 2*((i0+1)+(n0+1)-1); j4loop += 4 { - ipn4 := ipn4Out - 1 - j4 := j4loop - 1 - - z[j4-3], z[ipn4-j4-4] = z[ipn4-j4-4], z[j4-3] - z[j4-2], z[ipn4-j4-3] = z[ipn4-j4-3], z[j4-2] - z[j4-1], z[ipn4-j4-6] = z[ipn4-j4-6], z[j4-1] - z[j4], z[ipn4-j4-5] = z[ipn4-j4-5], z[j4] - } - if n0-i0 <= 4 { - z[4*(n0+1)+pp-2] = z[4*(i0+1)+pp-2] - z[4*(n0+1)-pp-1] = z[4*(i0+1)-pp-1] - } - dmin2 = math.Min(dmin2, z[4*(i0+1)-pp-2]) - z[4*(n0+1)+pp-2] = math.Min(math.Min(z[4*(n0+1)+pp-2], z[4*(i0+1)+pp-2]), z[4*(i0+1)+pp+2]) - z[4*(n0+1)-pp-1] = math.Min(math.Min(z[4*(n0+1)-pp-1], z[4*(i0+1)-pp-1]), z[4*(i0+1)-pp+3]) - qmax = math.Max(math.Max(qmax, z[4*(i0+1)+pp-4]), z[4*(i0+1)+pp]) - dmin = math.Copysign(0, -1) // Fortran code has -zero, but -0 in go is 0 - } - } - - // Choose a shift. - tau, ttype, g = impl.Dlasq4(i0, n0, z, pp, n0in, dmin, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g) - - // Call dqds until dmin > 0. -loop: - for { - i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq5(i0, n0, z, pp, tau, sigma) - - nDiv += n0 - i0 + 2 - iter++ - switch { - case dmin >= 0 && dmin1 >= 0: - // Success. - goto done - - case dmin < 0 && dmin1 > 0 && z[4*n0-pp-1] < tol*(sigma+dn1) && math.Abs(dn) < tol*sigma: - // Convergence hidden by negative dn. - z[4*n0-pp+1] = 0 - dmin = 0 - goto done - - case dmin < 0: - // Tau too big. Select new Tau and try again. - nFail++ - if ttype < -22 { - // Failed twice. Play it safe. - tau = 0 - } else if dmin1 > 0 { - // Late failure. Gives excellent shift. - tau = (tau + dmin) * (1 - 2*eps) - ttype -= 11 - } else { - // Early failure. Divide by 4. - tau = tau / 4 - ttype -= 12 - } - - case math.IsNaN(dmin): - if tau == 0 { - break loop - } - tau = 0 - - default: - // Possible underflow. Play it safe. - break loop - } - } - - // Risk of underflow. - dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq6(i0, n0, z, pp) - nDiv += n0 - i0 + 2 - iter++ - tau = 0 - -done: - if tau < sigma { - desig += tau - t = sigma + desig - desig -= t - sigma - } else { - t = sigma + tau - desig += sigma - (t - tau) - } - sigma = t - return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go deleted file mode 100644 index f6dbb31b9..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlasq4 computes an approximation to the smallest eigenvalue using values of d -// from the previous transform. -// i0, n0, and n0in are zero-indexed. -// -// Dlasq4 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasq4(i0, n0 int, z []float64, pp int, n0in int, dmin, dmin1, dmin2, dn, dn1, dn2, tau float64, ttype int, g float64) (tauOut float64, ttypeOut int, gOut float64) { - switch { - case i0 < 0: - panic(i0LT0) - case n0 < 0: - panic(n0LT0) - case len(z) < 4*n0: - panic(shortZ) - case pp != 0 && pp != 1: - panic(badPp) - } - - const ( - cnst1 = 0.563 - cnst2 = 1.01 - cnst3 = 1.05 - - cnstthird = 0.333 // TODO(btracey): Fix? - ) - // A negative dmin forces the shift to take that absolute value - // ttype records the type of shift. - if dmin <= 0 { - tau = -dmin - ttype = -1 - return tau, ttype, g - } - nn := 4*(n0+1) + pp - 1 // -1 for zero indexing - s := math.NaN() // Poison s so that failure to take a path below is obvious - if n0in == n0 { - // No eigenvalues deflated. - if dmin == dn || dmin == dn1 { - b1 := math.Sqrt(z[nn-3]) * math.Sqrt(z[nn-5]) - b2 := math.Sqrt(z[nn-7]) * math.Sqrt(z[nn-9]) - a2 := z[nn-7] + z[nn-5] - if dmin == dn && dmin1 == dn1 { - gap2 := dmin2 - a2 - dmin2/4 - var gap1 float64 - if gap2 > 0 && gap2 > b2 { - gap1 = a2 - dn - (b2/gap2)*b2 - } else { - gap1 = a2 - dn - (b1 + b2) - } - if gap1 > 0 && gap1 > b1 { - s = math.Max(dn-(b1/gap1)*b1, 0.5*dmin) - ttype = -2 - } else { - s = 0 - if dn > b1 { - s = dn - b1 - } - if a2 > b1+b2 { - s = math.Min(s, a2-(b1+b2)) - } - s = math.Max(s, cnstthird*dmin) - ttype = -3 - } - } else { - ttype = -4 - s = dmin / 4 - var gam float64 - var np int - if dmin == dn { - gam = dn - a2 = 0 - if z[nn-5] > z[nn-7] { - return tau, ttype, g - } - b2 = z[nn-5] / z[nn-7] - np = nn - 9 - } else { - np = nn - 2*pp - gam = dn1 - if z[np-4] > z[np-2] { - return tau, ttype, g - } - a2 = z[np-4] / z[np-2] - if z[nn-9] > z[nn-11] { - return tau, ttype, g - } - b2 = z[nn-9] / z[nn-11] - np = nn - 13 - } - // Approximate contribution to norm squared from i < nn-1. - a2 += b2 - for i4loop := np + 1; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { - i4 := i4loop - 1 - if b2 == 0 { - break - } - b1 = b2 - if z[i4] > z[i4-2] { - return tau, ttype, g - } - b2 *= z[i4] / z[i4-2] - a2 += b2 - if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { - break - } - } - a2 *= cnst3 - // Rayleigh quotient residual bound. - if a2 < cnst1 { - s = gam * (1 - math.Sqrt(a2)) / (1 + a2) - } - } - } else if dmin == dn2 { - ttype = -5 - s = dmin / 4 - // Compute contribution to norm squared from i > nn-2. - np := nn - 2*pp - b1 := z[np-2] - b2 := z[np-6] - gam := dn2 - if z[np-8] > b2 || z[np-4] > b1 { - return tau, ttype, g - } - a2 := (z[np-8] / b2) * (1 + z[np-4]/b1) - // Approximate contribution to norm squared from i < nn-2. - if n0-i0 > 2 { - b2 = z[nn-13] / z[nn-15] - a2 += b2 - for i4loop := (nn + 1) - 17; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { - i4 := i4loop - 1 - if b2 == 0 { - break - } - b1 = b2 - if z[i4] > z[i4-2] { - return tau, ttype, g - } - b2 *= z[i4] / z[i4-2] - a2 += b2 - if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { - break - } - } - a2 *= cnst3 - } - if a2 < cnst1 { - s = gam * (1 - math.Sqrt(a2)) / (1 + a2) - } - } else { - // Case 6, no information to guide us. - if ttype == -6 { - g += cnstthird * (1 - g) - } else if ttype == -18 { - g = cnstthird / 4 - } else { - g = 1.0 / 4 - } - s = g * dmin - ttype = -6 - } - } else if n0in == (n0 + 1) { - // One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. - if dmin1 == dn1 && dmin2 == dn2 { - ttype = -7 - s = cnstthird * dmin1 - if z[nn-5] > z[nn-7] { - return tau, ttype, g - } - b1 := z[nn-5] / z[nn-7] - b2 := b1 - if b2 != 0 { - for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { - i4 := i4loop - 1 - a2 := b1 - if z[i4] > z[i4-2] { - return tau, ttype, g - } - b1 *= z[i4] / z[i4-2] - b2 += b1 - if 100*math.Max(b1, a2) < b2 { - break - } - } - } - b2 = math.Sqrt(cnst3 * b2) - a2 := dmin1 / (1 + b2*b2) - gap2 := 0.5*dmin2 - a2 - if gap2 > 0 && gap2 > b2*a2 { - s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) - } else { - s = math.Max(s, a2*(1-cnst2*b2)) - ttype = -8 - } - } else { - s = dmin1 / 4 - if dmin1 == dn1 { - s = 0.5 * dmin1 - } - ttype = -9 - } - } else if n0in == (n0 + 2) { - // Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. - if dmin2 == dn2 && 2*z[nn-5] < z[nn-7] { - ttype = -10 - s = cnstthird * dmin2 - if z[nn-5] > z[nn-7] { - return tau, ttype, g - } - b1 := z[nn-5] / z[nn-7] - b2 := b1 - if b2 != 0 { - for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { - i4 := i4loop - 1 - if z[i4] > z[i4-2] { - return tau, ttype, g - } - b1 *= z[i4] / z[i4-2] - b2 += b1 - if 100*b1 < b2 { - break - } - } - } - b2 = math.Sqrt(cnst3 * b2) - a2 := dmin2 / (1 + b2*b2) - gap2 := z[nn-7] + z[nn-9] - math.Sqrt(z[nn-11])*math.Sqrt(z[nn-9]) - a2 - if gap2 > 0 && gap2 > b2*a2 { - s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) - } else { - s = math.Max(s, a2*(1-cnst2*b2)) - } - } else { - s = dmin2 / 4 - ttype = -11 - } - } else if n0in > n0+2 { - // Case 12, more than two eigenvalues deflated. No information. - s = 0 - ttype = -12 - } - tau = s - return tau, ttype, g -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go deleted file mode 100644 index d3826d918..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlasq5 computes one dqds transform in ping-pong form. -// i0 and n0 are zero-indexed. -// -// Dlasq5 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasq5(i0, n0 int, z []float64, pp int, tau, sigma float64) (i0Out, n0Out, ppOut int, tauOut, sigmaOut, dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { - // The lapack function has inputs for ieee and eps, but Go requires ieee so - // these are unnecessary. - - switch { - case i0 < 0: - panic(i0LT0) - case n0 < 0: - panic(n0LT0) - case len(z) < 4*n0: - panic(shortZ) - case pp != 0 && pp != 1: - panic(badPp) - } - - if n0-i0-1 <= 0 { - return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 - } - - eps := dlamchP - dthresh := eps * (sigma + tau) - if tau < dthresh*0.5 { - tau = 0 - } - var j4 int - var emin float64 - if tau != 0 { - j4 = 4*i0 + pp - emin = z[j4+4] - d := z[j4] - tau - dmin = d - // In the reference there are code paths that actually return this value. - // dmin1 = -z[j4] - if pp == 0 { - for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { - j4 := j4loop - 1 - z[j4-2] = d + z[j4-1] - tmp := z[j4+1] / z[j4-2] - d = d*tmp - tau - dmin = math.Min(dmin, d) - z[j4] = z[j4-1] * tmp - emin = math.Min(z[j4], emin) - } - } else { - for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { - j4 := j4loop - 1 - z[j4-3] = d + z[j4] - tmp := z[j4+2] / z[j4-3] - d = d*tmp - tau - dmin = math.Min(dmin, d) - z[j4-1] = z[j4] * tmp - emin = math.Min(z[j4-1], emin) - } - } - // Unroll the last two steps. - dnm2 = d - dmin2 = dmin - j4 = 4*((n0+1)-2) - pp - 1 - j4p2 := j4 + 2*pp - 1 - z[j4-2] = dnm2 + z[j4p2] - z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) - dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau - dmin = math.Min(dmin, dnm1) - - dmin1 = dmin - j4 += 4 - j4p2 = j4 + 2*pp - 1 - z[j4-2] = dnm1 + z[j4p2] - z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) - dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau - dmin = math.Min(dmin, dn) - } else { - // This is the version that sets d's to zero if they are small enough. - j4 = 4*(i0+1) + pp - 4 - emin = z[j4+4] - d := z[j4] - tau - dmin = d - // In the reference there are code paths that actually return this value. - // dmin1 = -z[j4] - if pp == 0 { - for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { - j4 := j4loop - 1 - z[j4-2] = d + z[j4-1] - tmp := z[j4+1] / z[j4-2] - d = d*tmp - tau - if d < dthresh { - d = 0 - } - dmin = math.Min(dmin, d) - z[j4] = z[j4-1] * tmp - emin = math.Min(z[j4], emin) - } - } else { - for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { - j4 := j4loop - 1 - z[j4-3] = d + z[j4] - tmp := z[j4+2] / z[j4-3] - d = d*tmp - tau - if d < dthresh { - d = 0 - } - dmin = math.Min(dmin, d) - z[j4-1] = z[j4] * tmp - emin = math.Min(z[j4-1], emin) - } - } - // Unroll the last two steps. - dnm2 = d - dmin2 = dmin - j4 = 4*((n0+1)-2) - pp - 1 - j4p2 := j4 + 2*pp - 1 - z[j4-2] = dnm2 + z[j4p2] - z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) - dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau - dmin = math.Min(dmin, dnm1) - - dmin1 = dmin - j4 += 4 - j4p2 = j4 + 2*pp - 1 - z[j4-2] = dnm1 + z[j4p2] - z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) - dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau - dmin = math.Min(dmin, dn) - } - z[j4+2] = dn - z[4*(n0+1)-pp-1] = emin - return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go deleted file mode 100644 index 54bf58756..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlasq6 computes one dqd transform in ping-pong form with protection against -// overflow and underflow. z has length at least 4*(n0+1) and holds the qd array. -// i0 is the zero-based first index. -// n0 is the zero-based last index. -// -// Dlasq6 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasq6(i0, n0 int, z []float64, pp int) (dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { - switch { - case i0 < 0: - panic(i0LT0) - case n0 < 0: - panic(n0LT0) - case len(z) < 4*n0: - panic(shortZ) - case pp != 0 && pp != 1: - panic(badPp) - } - - if n0-i0-1 <= 0 { - return dmin, dmin1, dmin2, dn, dnm1, dnm2 - } - - safmin := dlamchS - j4 := 4*(i0+1) + pp - 4 // -4 rather than -3 for zero indexing - emin := z[j4+4] - d := z[j4] - dmin = d - if pp == 0 { - for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { - j4 := j4loop - 1 // Translate back to zero-indexed. - z[j4-2] = d + z[j4-1] - if z[j4-2] == 0 { - z[j4] = 0 - d = z[j4+1] - dmin = d - emin = 0 - } else if safmin*z[j4+1] < z[j4-2] && safmin*z[j4-2] < z[j4+1] { - tmp := z[j4+1] / z[j4-2] - z[j4] = z[j4-1] * tmp - d *= tmp - } else { - z[j4] = z[j4+1] * (z[j4-1] / z[j4-2]) - d = z[j4+1] * (d / z[j4-2]) - } - dmin = math.Min(dmin, d) - emin = math.Min(emin, z[j4]) - } - } else { - for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { - j4 := j4loop - 1 - z[j4-3] = d + z[j4] - if z[j4-3] == 0 { - z[j4-1] = 0 - d = z[j4+2] - dmin = d - emin = 0 - } else if safmin*z[j4+2] < z[j4-3] && safmin*z[j4-3] < z[j4+2] { - tmp := z[j4+2] / z[j4-3] - z[j4-1] = z[j4] * tmp - d *= tmp - } else { - z[j4-1] = z[j4+2] * (z[j4] / z[j4-3]) - d = z[j4+2] * (d / z[j4-3]) - } - dmin = math.Min(dmin, d) - emin = math.Min(emin, z[j4-1]) - } - } - // Unroll last two steps. - dnm2 = d - dmin2 = dmin - j4 = 4*(n0-1) - pp - 1 - j4p2 := j4 + 2*pp - 1 - z[j4-2] = dnm2 + z[j4p2] - if z[j4-2] == 0 { - z[j4] = 0 - dnm1 = z[j4p2+2] - dmin = dnm1 - emin = 0 - } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { - tmp := z[j4p2+2] / z[j4-2] - z[j4] = z[j4p2] * tmp - dnm1 = dnm2 * tmp - } else { - z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) - dnm1 = z[j4p2+2] * (dnm2 / z[j4-2]) - } - dmin = math.Min(dmin, dnm1) - dmin1 = dmin - j4 += 4 - j4p2 = j4 + 2*pp - 1 - z[j4-2] = dnm1 + z[j4p2] - if z[j4-2] == 0 { - z[j4] = 0 - dn = z[j4p2+2] - dmin = dn - emin = 0 - } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { - tmp := z[j4p2+2] / z[j4-2] - z[j4] = z[j4p2] * tmp - dn = dnm1 * tmp - } else { - z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) - dn = z[j4p2+2] * (dnm1 / z[j4-2]) - } - dmin = math.Min(dmin, dn) - z[j4+2] = dn - z[4*(n0+1)-pp-1] = emin - return dmin, dmin1, dmin2, dn, dnm1, dnm2 -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go deleted file mode 100644 index a7dbe002d..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dlasr applies a sequence of plane rotations to the m×n matrix A. This series -// of plane rotations is implicitly represented by a matrix P. P is multiplied -// by a depending on the value of side -- A = P * A if side == lapack.Left, -// A = A * P^T if side == lapack.Right. -// -// The exact value of P depends on the value of pivot, but in all cases P is -// implicitly represented by a series of 2×2 rotation matrices. The entries of -// rotation matrix k are defined by s[k] and c[k] -// R(k) = [ c[k] s[k]] -// [-s[k] s[k]] -// If direct == lapack.Forward, the rotation matrices are applied as -// P = P(z-1) * ... * P(2) * P(1), while if direct == lapack.Backward they are -// applied as P = P(1) * P(2) * ... * P(n). -// -// pivot defines the mapping of the elements in R(k) to P(k). -// If pivot == lapack.Variable, the rotation is performed for the (k, k+1) plane. -// P(k) = [1 ] -// [ ... ] -// [ 1 ] -// [ c[k] s[k] ] -// [ -s[k] c[k] ] -// [ 1 ] -// [ ... ] -// [ 1] -// if pivot == lapack.Top, the rotation is performed for the (1, k+1) plane, -// P(k) = [c[k] s[k] ] -// [ 1 ] -// [ ... ] -// [ 1 ] -// [-s[k] c[k] ] -// [ 1 ] -// [ ... ] -// [ 1] -// and if pivot == lapack.Bottom, the rotation is performed for the (k, z) plane. -// P(k) = [1 ] -// [ ... ] -// [ 1 ] -// [ c[k] s[k]] -// [ 1 ] -// [ ... ] -// [ 1 ] -// [ -s[k] c[k]] -// s and c have length m - 1 if side == blas.Left, and n - 1 if side == blas.Right. -// -// Dlasr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasr(side blas.Side, pivot lapack.Pivot, direct lapack.Direct, m, n int, c, s, a []float64, lda int) { - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case pivot != lapack.Variable && pivot != lapack.Top && pivot != lapack.Bottom: - panic(badPivot) - case direct != lapack.Forward && direct != lapack.Backward: - panic(badDirect) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if m == 0 || n == 0 { - return - } - - if side == blas.Left { - if len(c) < m-1 { - panic(shortC) - } - if len(s) < m-1 { - panic(shortS) - } - } else { - if len(c) < n-1 { - panic(shortC) - } - if len(s) < n-1 { - panic(shortS) - } - } - if len(a) < (m-1)*lda+n { - panic(shortA) - } - - if side == blas.Left { - if pivot == lapack.Variable { - if direct == lapack.Forward { - for j := 0; j < m-1; j++ { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < n; i++ { - tmp2 := a[j*lda+i] - tmp := a[(j+1)*lda+i] - a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 - a[j*lda+i] = stmp*tmp + ctmp*tmp2 - } - } - } - return - } - for j := m - 2; j >= 0; j-- { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < n; i++ { - tmp2 := a[j*lda+i] - tmp := a[(j+1)*lda+i] - a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 - a[j*lda+i] = stmp*tmp + ctmp*tmp2 - } - } - } - return - } else if pivot == lapack.Top { - if direct == lapack.Forward { - for j := 1; j < m; j++ { - ctmp := c[j-1] - stmp := s[j-1] - if ctmp != 1 || stmp != 0 { - for i := 0; i < n; i++ { - tmp := a[j*lda+i] - tmp2 := a[i] - a[j*lda+i] = ctmp*tmp - stmp*tmp2 - a[i] = stmp*tmp + ctmp*tmp2 - } - } - } - return - } - for j := m - 1; j >= 1; j-- { - ctmp := c[j-1] - stmp := s[j-1] - if ctmp != 1 || stmp != 0 { - for i := 0; i < n; i++ { - ctmp := c[j-1] - stmp := s[j-1] - if ctmp != 1 || stmp != 0 { - for i := 0; i < n; i++ { - tmp := a[j*lda+i] - tmp2 := a[i] - a[j*lda+i] = ctmp*tmp - stmp*tmp2 - a[i] = stmp*tmp + ctmp*tmp2 - } - } - } - } - } - return - } - if direct == lapack.Forward { - for j := 0; j < m-1; j++ { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < n; i++ { - tmp := a[j*lda+i] - tmp2 := a[(m-1)*lda+i] - a[j*lda+i] = stmp*tmp2 + ctmp*tmp - a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp - } - } - } - return - } - for j := m - 2; j >= 0; j-- { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < n; i++ { - tmp := a[j*lda+i] - tmp2 := a[(m-1)*lda+i] - a[j*lda+i] = stmp*tmp2 + ctmp*tmp - a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp - } - } - } - return - } - if pivot == lapack.Variable { - if direct == lapack.Forward { - for j := 0; j < n-1; j++ { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < m; i++ { - tmp := a[i*lda+j+1] - tmp2 := a[i*lda+j] - a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 - a[i*lda+j] = stmp*tmp + ctmp*tmp2 - } - } - } - return - } - for j := n - 2; j >= 0; j-- { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < m; i++ { - tmp := a[i*lda+j+1] - tmp2 := a[i*lda+j] - a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 - a[i*lda+j] = stmp*tmp + ctmp*tmp2 - } - } - } - return - } else if pivot == lapack.Top { - if direct == lapack.Forward { - for j := 1; j < n; j++ { - ctmp := c[j-1] - stmp := s[j-1] - if ctmp != 1 || stmp != 0 { - for i := 0; i < m; i++ { - tmp := a[i*lda+j] - tmp2 := a[i*lda] - a[i*lda+j] = ctmp*tmp - stmp*tmp2 - a[i*lda] = stmp*tmp + ctmp*tmp2 - } - } - } - return - } - for j := n - 1; j >= 1; j-- { - ctmp := c[j-1] - stmp := s[j-1] - if ctmp != 1 || stmp != 0 { - for i := 0; i < m; i++ { - tmp := a[i*lda+j] - tmp2 := a[i*lda] - a[i*lda+j] = ctmp*tmp - stmp*tmp2 - a[i*lda] = stmp*tmp + ctmp*tmp2 - } - } - } - return - } - if direct == lapack.Forward { - for j := 0; j < n-1; j++ { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < m; i++ { - tmp := a[i*lda+j] - tmp2 := a[i*lda+n-1] - a[i*lda+j] = stmp*tmp2 + ctmp*tmp - a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp - } - - } - } - return - } - for j := n - 2; j >= 0; j-- { - ctmp := c[j] - stmp := s[j] - if ctmp != 1 || stmp != 0 { - for i := 0; i < m; i++ { - tmp := a[i*lda+j] - tmp2 := a[i*lda+n-1] - a[i*lda+j] = stmp*tmp2 + ctmp*tmp - a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go deleted file mode 100644 index be472805b..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "sort" - - "gonum.org/v1/gonum/lapack" -) - -// Dlasrt sorts the numbers in the input slice d. If s == lapack.SortIncreasing, -// the elements are sorted in increasing order. If s == lapack.SortDecreasing, -// the elements are sorted in decreasing order. For other values of s Dlasrt -// will panic. -// -// Dlasrt is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasrt(s lapack.Sort, n int, d []float64) { - switch { - case n < 0: - panic(nLT0) - case len(d) < n: - panic(shortD) - } - - d = d[:n] - switch s { - default: - panic(badSort) - case lapack.SortIncreasing: - sort.Float64s(d) - case lapack.SortDecreasing: - sort.Sort(sort.Reverse(sort.Float64Slice(d))) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go deleted file mode 100644 index 9c2dc7729..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlassq updates a sum of squares in scaled form. The input parameters scale and -// sumsq represent the current scale and total sum of squares. These values are -// updated with the information in the first n elements of the vector specified -// by x and incX. -// -// Dlassq is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlassq(n int, x []float64, incx int, scale float64, sumsq float64) (scl, smsq float64) { - switch { - case n < 0: - panic(nLT0) - case incx <= 0: - panic(badIncX) - case len(x) < 1+(n-1)*incx: - panic(shortX) - } - - if n == 0 { - return scale, sumsq - } - - for ix := 0; ix <= (n-1)*incx; ix += incx { - absxi := math.Abs(x[ix]) - if absxi > 0 || math.IsNaN(absxi) { - if scale < absxi { - sumsq = 1 + sumsq*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumsq += (absxi / scale) * (absxi / scale) - } - } - } - return scale, sumsq -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go deleted file mode 100644 index 204af1931..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Dlasv2 computes the singular value decomposition of a 2×2 matrix. -// [ csl snl] [f g] [csr -snr] = [ssmax 0] -// [-snl csl] [0 h] [snr csr] = [ 0 ssmin] -// ssmax is the larger absolute singular value, and ssmin is the smaller absolute -// singular value. [cls, snl] and [csr, snr] are the left and right singular vectors. -// -// Dlasv2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasv2(f, g, h float64) (ssmin, ssmax, snr, csr, snl, csl float64) { - ft := f - fa := math.Abs(ft) - ht := h - ha := math.Abs(h) - // pmax points to the largest element of the matrix in terms of absolute value. - // 1 if F, 2 if G, 3 if H. - pmax := 1 - swap := ha > fa - if swap { - pmax = 3 - ft, ht = ht, ft - fa, ha = ha, fa - } - gt := g - ga := math.Abs(gt) - var clt, crt, slt, srt float64 - if ga == 0 { - ssmin = ha - ssmax = fa - clt = 1 - crt = 1 - slt = 0 - srt = 0 - } else { - gasmall := true - if ga > fa { - pmax = 2 - if (fa / ga) < dlamchE { - gasmall = false - ssmax = ga - if ha > 1 { - ssmin = fa / (ga / ha) - } else { - ssmin = (fa / ga) * ha - } - clt = 1 - slt = ht / gt - srt = 1 - crt = ft / gt - } - } - if gasmall { - d := fa - ha - l := d / fa - if d == fa { // deal with inf - l = 1 - } - m := gt / ft - t := 2 - l - s := math.Hypot(t, m) - var r float64 - if l == 0 { - r = math.Abs(m) - } else { - r = math.Hypot(l, m) - } - a := 0.5 * (s + r) - ssmin = ha / a - ssmax = fa * a - if m == 0 { - if l == 0 { - t = math.Copysign(2, ft) * math.Copysign(1, gt) - } else { - t = gt/math.Copysign(d, ft) + m/t - } - } else { - t = (m/(s+t) + m/(r+l)) * (1 + a) - } - l = math.Hypot(t, 2) - crt = 2 / l - srt = t / l - clt = (crt + srt*m) / a - slt = (ht / ft) * srt / a - } - } - if swap { - csl = srt - snl = crt - csr = slt - snr = clt - } else { - csl = clt - snl = slt - csr = crt - snr = srt - } - var tsign float64 - switch pmax { - case 1: - tsign = math.Copysign(1, csr) * math.Copysign(1, csl) * math.Copysign(1, f) - case 2: - tsign = math.Copysign(1, snr) * math.Copysign(1, csl) * math.Copysign(1, g) - case 3: - tsign = math.Copysign(1, snr) * math.Copysign(1, snl) * math.Copysign(1, h) - } - ssmax = math.Copysign(ssmax, tsign) - ssmin = math.Copysign(ssmin, tsign*math.Copysign(1, f)*math.Copysign(1, h)) - return ssmin, ssmax, snr, csr, snl, csl -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go deleted file mode 100644 index b207d1218..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas/blas64" - -// Dlaswp swaps the rows k1 to k2 of a rectangular matrix A according to the -// indices in ipiv so that row k is swapped with ipiv[k]. -// -// n is the number of columns of A and incX is the increment for ipiv. If incX -// is 1, the swaps are applied from k1 to k2. If incX is -1, the swaps are -// applied in reverse order from k2 to k1. For other values of incX Dlaswp will -// panic. ipiv must have length k2+1, otherwise Dlaswp will panic. -// -// The indices k1, k2, and the elements of ipiv are zero-based. -// -// Dlaswp is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlaswp(n int, a []float64, lda int, k1, k2 int, ipiv []int, incX int) { - switch { - case n < 0: - panic(nLT0) - case k2 < 0: - panic(badK2) - case k1 < 0 || k2 < k1: - panic(badK1) - case lda < max(1, n): - panic(badLdA) - case len(a) < (k2-1)*lda+n: - panic(shortA) - case len(ipiv) != k2+1: - panic(badLenIpiv) - case incX != 1 && incX != -1: - panic(absIncNotOne) - } - - if n == 0 { - return - } - - bi := blas64.Implementation() - if incX == 1 { - for k := k1; k <= k2; k++ { - bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) - } - return - } - for k := k2; k >= k1; k-- { - bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go deleted file mode 100644 index abfe60e58..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlasy2 solves the Sylvester matrix equation where the matrices are of order 1 -// or 2. It computes the unknown n1×n2 matrix X so that -// TL*X + sgn*X*TR = scale*B, if tranl == false and tranr == false, -// TL^T*X + sgn*X*TR = scale*B, if tranl == true and tranr == false, -// TL*X + sgn*X*TR^T = scale*B, if tranl == false and tranr == true, -// TL^T*X + sgn*X*TR^T = scale*B, if tranl == true and tranr == true, -// where TL is n1×n1, TR is n2×n2, B is n1×n2, and 1 <= n1,n2 <= 2. -// -// isgn must be 1 or -1, and n1 and n2 must be 0, 1, or 2, but these conditions -// are not checked. -// -// Dlasy2 returns three values, a scale factor that is chosen less than or equal -// to 1 to prevent the solution overflowing, the infinity norm of the solution, -// and an indicator of success. If ok is false, TL and TR have eigenvalues that -// are too close, so TL or TR is perturbed to get a non-singular equation. -// -// Dlasy2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlasy2(tranl, tranr bool, isgn, n1, n2 int, tl []float64, ldtl int, tr []float64, ldtr int, b []float64, ldb int, x []float64, ldx int) (scale, xnorm float64, ok bool) { - // TODO(vladimir-ch): Add input validation checks conditionally skipped - // using the build tag mechanism. - - ok = true - // Quick return if possible. - if n1 == 0 || n2 == 0 { - return scale, xnorm, ok - } - - // Set constants to control overflow. - eps := dlamchP - smlnum := dlamchS / eps - sgn := float64(isgn) - - if n1 == 1 && n2 == 1 { - // 1×1 case: TL11*X + sgn*X*TR11 = B11. - tau1 := tl[0] + sgn*tr[0] - bet := math.Abs(tau1) - if bet <= smlnum { - tau1 = smlnum - bet = smlnum - ok = false - } - scale = 1 - gam := math.Abs(b[0]) - if smlnum*gam > bet { - scale = 1 / gam - } - x[0] = b[0] * scale / tau1 - xnorm = math.Abs(x[0]) - return scale, xnorm, ok - } - - if n1+n2 == 3 { - // 1×2 or 2×1 case. - var ( - smin float64 - tmp [4]float64 // tmp is used as a 2×2 row-major matrix. - btmp [2]float64 - ) - if n1 == 1 && n2 == 2 { - // 1×2 case: TL11*[X11 X12] + sgn*[X11 X12]*op[TR11 TR12] = [B11 B12]. - // [TR21 TR22] - smin = math.Abs(tl[0]) - smin = math.Max(smin, math.Max(math.Abs(tr[0]), math.Abs(tr[1]))) - smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) - smin = math.Max(eps*smin, smlnum) - tmp[0] = tl[0] + sgn*tr[0] - tmp[3] = tl[0] + sgn*tr[ldtr+1] - if tranr { - tmp[1] = sgn * tr[1] - tmp[2] = sgn * tr[ldtr] - } else { - tmp[1] = sgn * tr[ldtr] - tmp[2] = sgn * tr[1] - } - btmp[0] = b[0] - btmp[1] = b[1] - } else { - // 2×1 case: op[TL11 TL12]*[X11] + sgn*[X11]*TR11 = [B11]. - // [TL21 TL22]*[X21] [X21] [B21] - smin = math.Abs(tr[0]) - smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) - smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) - smin = math.Max(eps*smin, smlnum) - tmp[0] = tl[0] + sgn*tr[0] - tmp[3] = tl[ldtl+1] + sgn*tr[0] - if tranl { - tmp[1] = tl[ldtl] - tmp[2] = tl[1] - } else { - tmp[1] = tl[1] - tmp[2] = tl[ldtl] - } - btmp[0] = b[0] - btmp[1] = b[ldb] - } - - // Solve 2×2 system using complete pivoting. - // Set pivots less than smin to smin. - - bi := blas64.Implementation() - ipiv := bi.Idamax(len(tmp), tmp[:], 1) - // Compute the upper triangular matrix [u11 u12]. - // [ 0 u22] - u11 := tmp[ipiv] - if math.Abs(u11) <= smin { - ok = false - u11 = smin - } - locu12 := [4]int{1, 0, 3, 2} // Index in tmp of the element on the same row as the pivot. - u12 := tmp[locu12[ipiv]] - locl21 := [4]int{2, 3, 0, 1} // Index in tmp of the element on the same column as the pivot. - l21 := tmp[locl21[ipiv]] / u11 - locu22 := [4]int{3, 2, 1, 0} // Index in tmp of the remaining element. - u22 := tmp[locu22[ipiv]] - l21*u12 - if math.Abs(u22) <= smin { - ok = false - u22 = smin - } - if ipiv&0x2 != 0 { // true for ipiv equal to 2 and 3. - // The pivot was in the second row, swap the elements of - // the right-hand side. - btmp[0], btmp[1] = btmp[1], btmp[0]-l21*btmp[1] - } else { - btmp[1] -= l21 * btmp[0] - } - scale = 1 - if 2*smlnum*math.Abs(btmp[1]) > math.Abs(u22) || 2*smlnum*math.Abs(btmp[0]) > math.Abs(u11) { - scale = 0.5 / math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) - btmp[0] *= scale - btmp[1] *= scale - } - // Solve the system [u11 u12] [x21] = [ btmp[0] ]. - // [ 0 u22] [x22] [ btmp[1] ] - x22 := btmp[1] / u22 - x21 := btmp[0]/u11 - (u12/u11)*x22 - if ipiv&0x1 != 0 { // true for ipiv equal to 1 and 3. - // The pivot was in the second column, swap the elements - // of the solution. - x21, x22 = x22, x21 - } - x[0] = x21 - if n1 == 1 { - x[1] = x22 - xnorm = math.Abs(x[0]) + math.Abs(x[1]) - } else { - x[ldx] = x22 - xnorm = math.Max(math.Abs(x[0]), math.Abs(x[ldx])) - } - return scale, xnorm, ok - } - - // 2×2 case: op[TL11 TL12]*[X11 X12] + SGN*[X11 X12]*op[TR11 TR12] = [B11 B12]. - // [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] - // - // Solve equivalent 4×4 system using complete pivoting. - // Set pivots less than smin to smin. - - smin := math.Max(math.Abs(tr[0]), math.Abs(tr[1])) - smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) - smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) - smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) - smin = math.Max(eps*smin, smlnum) - - var t [4][4]float64 - t[0][0] = tl[0] + sgn*tr[0] - t[1][1] = tl[0] + sgn*tr[ldtr+1] - t[2][2] = tl[ldtl+1] + sgn*tr[0] - t[3][3] = tl[ldtl+1] + sgn*tr[ldtr+1] - if tranl { - t[0][2] = tl[ldtl] - t[1][3] = tl[ldtl] - t[2][0] = tl[1] - t[3][1] = tl[1] - } else { - t[0][2] = tl[1] - t[1][3] = tl[1] - t[2][0] = tl[ldtl] - t[3][1] = tl[ldtl] - } - if tranr { - t[0][1] = sgn * tr[1] - t[1][0] = sgn * tr[ldtr] - t[2][3] = sgn * tr[1] - t[3][2] = sgn * tr[ldtr] - } else { - t[0][1] = sgn * tr[ldtr] - t[1][0] = sgn * tr[1] - t[2][3] = sgn * tr[ldtr] - t[3][2] = sgn * tr[1] - } - - var btmp [4]float64 - btmp[0] = b[0] - btmp[1] = b[1] - btmp[2] = b[ldb] - btmp[3] = b[ldb+1] - - // Perform elimination. - var jpiv [4]int // jpiv records any column swaps for pivoting. - for i := 0; i < 3; i++ { - var ( - xmax float64 - ipsv, jpsv int - ) - for ip := i; ip < 4; ip++ { - for jp := i; jp < 4; jp++ { - if math.Abs(t[ip][jp]) >= xmax { - xmax = math.Abs(t[ip][jp]) - ipsv = ip - jpsv = jp - } - } - } - if ipsv != i { - // The pivot is not in the top row of the unprocessed - // block, swap rows ipsv and i of t and btmp. - t[ipsv], t[i] = t[i], t[ipsv] - btmp[ipsv], btmp[i] = btmp[i], btmp[ipsv] - } - if jpsv != i { - // The pivot is not in the left column of the - // unprocessed block, swap columns jpsv and i of t. - for k := 0; k < 4; k++ { - t[k][jpsv], t[k][i] = t[k][i], t[k][jpsv] - } - } - jpiv[i] = jpsv - if math.Abs(t[i][i]) < smin { - ok = false - t[i][i] = smin - } - for k := i + 1; k < 4; k++ { - t[k][i] /= t[i][i] - btmp[k] -= t[k][i] * btmp[i] - for j := i + 1; j < 4; j++ { - t[k][j] -= t[k][i] * t[i][j] - } - } - } - if math.Abs(t[3][3]) < smin { - ok = false - t[3][3] = smin - } - scale = 1 - if 8*smlnum*math.Abs(btmp[0]) > math.Abs(t[0][0]) || - 8*smlnum*math.Abs(btmp[1]) > math.Abs(t[1][1]) || - 8*smlnum*math.Abs(btmp[2]) > math.Abs(t[2][2]) || - 8*smlnum*math.Abs(btmp[3]) > math.Abs(t[3][3]) { - - maxbtmp := math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) - maxbtmp = math.Max(maxbtmp, math.Max(math.Abs(btmp[2]), math.Abs(btmp[3]))) - scale = 1 / 8 / maxbtmp - btmp[0] *= scale - btmp[1] *= scale - btmp[2] *= scale - btmp[3] *= scale - } - // Compute the solution of the upper triangular system t * tmp = btmp. - var tmp [4]float64 - for i := 3; i >= 0; i-- { - temp := 1 / t[i][i] - tmp[i] = btmp[i] * temp - for j := i + 1; j < 4; j++ { - tmp[i] -= temp * t[i][j] * tmp[j] - } - } - for i := 2; i >= 0; i-- { - if jpiv[i] != i { - tmp[i], tmp[jpiv[i]] = tmp[jpiv[i]], tmp[i] - } - } - x[0] = tmp[0] - x[1] = tmp[1] - x[ldx] = tmp[2] - x[ldx+1] = tmp[3] - xnorm = math.Max(math.Abs(tmp[0])+math.Abs(tmp[1]), math.Abs(tmp[2])+math.Abs(tmp[3])) - return scale, xnorm, ok -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go deleted file mode 100644 index 018efc98c..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlatrd reduces nb rows and columns of a real n×n symmetric matrix A to symmetric -// tridiagonal form. It computes the orthonormal similarity transformation -// Q^T * A * Q -// and returns the matrices V and W to apply to the unreduced part of A. If -// uplo == blas.Upper, the upper triangle is supplied and the last nb rows are -// reduced. If uplo == blas.Lower, the lower triangle is supplied and the first -// nb rows are reduced. -// -// a contains the symmetric matrix on entry with active triangular half specified -// by uplo. On exit, the nb columns have been reduced to tridiagonal form. The -// diagonal contains the diagonal of the reduced matrix, the off-diagonal is -// set to 1, and the remaining elements contain the data to construct Q. -// -// If uplo == blas.Upper, with n = 5 and nb = 2 on exit a is -// [ a a a v4 v5] -// [ a a v4 v5] -// [ a 1 v5] -// [ d 1] -// [ d] -// -// If uplo == blas.Lower, with n = 5 and nb = 2, on exit a is -// [ d ] -// [ 1 d ] -// [v1 1 a ] -// [v1 v2 a a ] -// [v1 v2 a a a] -// -// e contains the superdiagonal elements of the reduced matrix. If uplo == blas.Upper, -// e[n-nb:n-1] contains the last nb columns of the reduced matrix, while if -// uplo == blas.Lower, e[:nb] contains the first nb columns of the reduced matrix. -// e must have length at least n-1, and Dlatrd will panic otherwise. -// -// tau contains the scalar factors of the elementary reflectors needed to construct Q. -// The reflectors are stored in tau[n-nb:n-1] if uplo == blas.Upper, and in -// tau[:nb] if uplo == blas.Lower. tau must have length n-1, and Dlatrd will panic -// otherwise. -// -// w is an n×nb matrix. On exit it contains the data to update the unreduced part -// of A. -// -// The matrix Q is represented as a product of elementary reflectors. Each reflector -// H has the form -// I - tau * v * v^T -// If uplo == blas.Upper, -// Q = H_{n-1} * H_{n-2} * ... * H_{n-nb} -// where v[:i-1] is stored in A[:i-1,i], v[i-1] = 1, and v[i:n] = 0. -// -// If uplo == blas.Lower, -// Q = H_0 * H_1 * ... * H_{nb-1} -// where v[:i+1] = 0, v[i+1] = 1, and v[i+2:n] is stored in A[i+2:n,i]. -// -// The vectors v form the n×nb matrix V which is used with W to apply a -// symmetric rank-2 update to the unreduced part of A -// A = A - V * W^T - W * V^T -// -// Dlatrd is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlatrd(uplo blas.Uplo, n, nb int, a []float64, lda int, e, tau, w []float64, ldw int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case nb < 0: - panic(nbLT0) - case nb > n: - panic(nbGTN) - case lda < max(1, n): - panic(badLdA) - case ldw < max(1, nb): - panic(badLdW) - } - - if n == 0 { - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(w) < (n-1)*ldw+nb: - panic(shortW) - case len(e) < n-1: - panic(shortE) - case len(tau) < n-1: - panic(shortTau) - } - - bi := blas64.Implementation() - - if uplo == blas.Upper { - for i := n - 1; i >= n-nb; i-- { - iw := i - n + nb - if i < n-1 { - // Update A(0:i, i). - bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, a[i+1:], lda, - w[i*ldw+iw+1:], 1, 1, a[i:], lda) - bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, w[iw+1:], ldw, - a[i*lda+i+1:], 1, 1, a[i:], lda) - } - if i > 0 { - // Generate elementary reflector H_i to annihilate A(0:i-2,i). - e[i-1], tau[i-1] = impl.Dlarfg(i, a[(i-1)*lda+i], a[i:], lda) - a[(i-1)*lda+i] = 1 - - // Compute W(0:i-1, i). - bi.Dsymv(blas.Upper, i, 1, a, lda, a[i:], lda, 0, w[iw:], ldw) - if i < n-1 { - bi.Dgemv(blas.Trans, i, n-i-1, 1, w[iw+1:], ldw, - a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) - bi.Dgemv(blas.NoTrans, i, n-i-1, -1, a[i+1:], lda, - w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) - bi.Dgemv(blas.Trans, i, n-i-1, 1, a[i+1:], lda, - a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) - bi.Dgemv(blas.NoTrans, i, n-i-1, -1, w[iw+1:], ldw, - w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) - } - bi.Dscal(i, tau[i-1], w[iw:], ldw) - alpha := -0.5 * tau[i-1] * bi.Ddot(i, w[iw:], ldw, a[i:], lda) - bi.Daxpy(i, alpha, a[i:], lda, w[iw:], ldw) - } - } - } else { - // Reduce first nb columns of lower triangle. - for i := 0; i < nb; i++ { - // Update A(i:n, i) - bi.Dgemv(blas.NoTrans, n-i, i, -1, a[i*lda:], lda, - w[i*ldw:], 1, 1, a[i*lda+i:], lda) - bi.Dgemv(blas.NoTrans, n-i, i, -1, w[i*ldw:], ldw, - a[i*lda:], 1, 1, a[i*lda+i:], lda) - if i < n-1 { - // Generate elementary reflector H_i to annihilate A(i+2:n,i). - e[i], tau[i] = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) - a[(i+1)*lda+i] = 1 - - // Compute W(i+1:n,i). - bi.Dsymv(blas.Lower, n-i-1, 1, a[(i+1)*lda+i+1:], lda, - a[(i+1)*lda+i:], lda, 0, w[(i+1)*ldw+i:], ldw) - bi.Dgemv(blas.Trans, n-i-1, i, 1, w[(i+1)*ldw:], ldw, - a[(i+1)*lda+i:], lda, 0, w[i:], ldw) - bi.Dgemv(blas.NoTrans, n-i-1, i, -1, a[(i+1)*lda:], lda, - w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) - bi.Dgemv(blas.Trans, n-i-1, i, 1, a[(i+1)*lda:], lda, - a[(i+1)*lda+i:], lda, 0, w[i:], ldw) - bi.Dgemv(blas.NoTrans, n-i-1, i, -1, w[(i+1)*ldw:], ldw, - w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) - bi.Dscal(n-i-1, tau[i], w[(i+1)*ldw+i:], ldw) - alpha := -0.5 * tau[i] * bi.Ddot(n-i-1, w[(i+1)*ldw+i:], ldw, - a[(i+1)*lda+i:], lda) - bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, - w[(i+1)*ldw+i:], ldw) - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go deleted file mode 100644 index dc445c6fe..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlatrs solves a triangular system of equations scaled to prevent overflow. It -// solves -// A * x = scale * b if trans == blas.NoTrans -// A^T * x = scale * b if trans == blas.Trans -// where the scale s is set for numeric stability. -// -// A is an n×n triangular matrix. On entry, the slice x contains the values of -// b, and on exit it contains the solution vector x. -// -// If normin == true, cnorm is an input and cnorm[j] contains the norm of the off-diagonal -// part of the j^th column of A. If trans == blas.NoTrans, cnorm[j] must be greater -// than or equal to the infinity norm, and greater than or equal to the one-norm -// otherwise. If normin == false, then cnorm is treated as an output, and is set -// to contain the 1-norm of the off-diagonal part of the j^th column of A. -// -// Dlatrs is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dlatrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, normin bool, n int, a []float64, lda int, x []float64, cnorm []float64) (scale float64) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: - panic(badTrans) - case diag != blas.Unit && diag != blas.NonUnit: - panic(badDiag) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return 0 - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(x) < n: - panic(shortX) - case len(cnorm) < n: - panic(shortCNorm) - } - - upper := uplo == blas.Upper - nonUnit := diag == blas.NonUnit - - smlnum := dlamchS / dlamchP - bignum := 1 / smlnum - scale = 1 - - bi := blas64.Implementation() - - if !normin { - if upper { - cnorm[0] = 0 - for j := 1; j < n; j++ { - cnorm[j] = bi.Dasum(j, a[j:], lda) - } - } else { - for j := 0; j < n-1; j++ { - cnorm[j] = bi.Dasum(n-j-1, a[(j+1)*lda+j:], lda) - } - cnorm[n-1] = 0 - } - } - // Scale the column norms by tscal if the maximum element in cnorm is greater than bignum. - imax := bi.Idamax(n, cnorm, 1) - tmax := cnorm[imax] - var tscal float64 - if tmax <= bignum { - tscal = 1 - } else { - tscal = 1 / (smlnum * tmax) - bi.Dscal(n, tscal, cnorm, 1) - } - - // Compute a bound on the computed solution vector to see if bi.Dtrsv can be used. - j := bi.Idamax(n, x, 1) - xmax := math.Abs(x[j]) - xbnd := xmax - var grow float64 - var jfirst, jlast, jinc int - if trans == blas.NoTrans { - if upper { - jfirst = n - 1 - jlast = -1 - jinc = -1 - } else { - jfirst = 0 - jlast = n - jinc = 1 - } - // Compute the growth in A * x = b. - if tscal != 1 { - grow = 0 - goto Solve - } - if nonUnit { - grow = 1 / math.Max(xbnd, smlnum) - xbnd = grow - for j := jfirst; j != jlast; j += jinc { - if grow <= smlnum { - goto Solve - } - tjj := math.Abs(a[j*lda+j]) - xbnd = math.Min(xbnd, math.Min(1, tjj)*grow) - if tjj+cnorm[j] >= smlnum { - grow *= tjj / (tjj + cnorm[j]) - } else { - grow = 0 - } - } - grow = xbnd - } else { - grow = math.Min(1, 1/math.Max(xbnd, smlnum)) - for j := jfirst; j != jlast; j += jinc { - if grow <= smlnum { - goto Solve - } - grow *= 1 / (1 + cnorm[j]) - } - } - } else { - if upper { - jfirst = 0 - jlast = n - jinc = 1 - } else { - jfirst = n - 1 - jlast = -1 - jinc = -1 - } - if tscal != 1 { - grow = 0 - goto Solve - } - if nonUnit { - grow = 1 / (math.Max(xbnd, smlnum)) - xbnd = grow - for j := jfirst; j != jlast; j += jinc { - if grow <= smlnum { - goto Solve - } - xj := 1 + cnorm[j] - grow = math.Min(grow, xbnd/xj) - tjj := math.Abs(a[j*lda+j]) - if xj > tjj { - xbnd *= tjj / xj - } - } - grow = math.Min(grow, xbnd) - } else { - grow = math.Min(1, 1/math.Max(xbnd, smlnum)) - for j := jfirst; j != jlast; j += jinc { - if grow <= smlnum { - goto Solve - } - xj := 1 + cnorm[j] - grow /= xj - } - } - } - -Solve: - if grow*tscal > smlnum { - // Use the Level 2 BLAS solve if the reciprocal of the bound on - // elements of X is not too small. - bi.Dtrsv(uplo, trans, diag, n, a, lda, x, 1) - if tscal != 1 { - bi.Dscal(n, 1/tscal, cnorm, 1) - } - return scale - } - - // Use a Level 1 BLAS solve, scaling intermediate results. - if xmax > bignum { - scale = bignum / xmax - bi.Dscal(n, scale, x, 1) - xmax = bignum - } - if trans == blas.NoTrans { - for j := jfirst; j != jlast; j += jinc { - xj := math.Abs(x[j]) - var tjj, tjjs float64 - if nonUnit { - tjjs = a[j*lda+j] * tscal - } else { - tjjs = tscal - if tscal == 1 { - goto Skip1 - } - } - tjj = math.Abs(tjjs) - if tjj > smlnum { - if tjj < 1 { - if xj > tjj*bignum { - rec := 1 / xj - bi.Dscal(n, rec, x, 1) - scale *= rec - xmax *= rec - } - } - x[j] /= tjjs - xj = math.Abs(x[j]) - } else if tjj > 0 { - if xj > tjj*bignum { - rec := (tjj * bignum) / xj - if cnorm[j] > 1 { - rec /= cnorm[j] - } - bi.Dscal(n, rec, x, 1) - scale *= rec - xmax *= rec - } - x[j] /= tjjs - xj = math.Abs(x[j]) - } else { - for i := 0; i < n; i++ { - x[i] = 0 - } - x[j] = 1 - xj = 1 - scale = 0 - xmax = 0 - } - Skip1: - if xj > 1 { - rec := 1 / xj - if cnorm[j] > (bignum-xmax)*rec { - rec *= 0.5 - bi.Dscal(n, rec, x, 1) - scale *= rec - } - } else if xj*cnorm[j] > bignum-xmax { - bi.Dscal(n, 0.5, x, 1) - scale *= 0.5 - } - if upper { - if j > 0 { - bi.Daxpy(j, -x[j]*tscal, a[j:], lda, x, 1) - i := bi.Idamax(j, x, 1) - xmax = math.Abs(x[i]) - } - } else { - if j < n-1 { - bi.Daxpy(n-j-1, -x[j]*tscal, a[(j+1)*lda+j:], lda, x[j+1:], 1) - i := j + bi.Idamax(n-j-1, x[j+1:], 1) - xmax = math.Abs(x[i]) - } - } - } - } else { - for j := jfirst; j != jlast; j += jinc { - xj := math.Abs(x[j]) - uscal := tscal - rec := 1 / math.Max(xmax, 1) - var tjjs float64 - if cnorm[j] > (bignum-xj)*rec { - rec *= 0.5 - if nonUnit { - tjjs = a[j*lda+j] * tscal - } else { - tjjs = tscal - } - tjj := math.Abs(tjjs) - if tjj > 1 { - rec = math.Min(1, rec*tjj) - uscal /= tjjs - } - if rec < 1 { - bi.Dscal(n, rec, x, 1) - scale *= rec - xmax *= rec - } - } - var sumj float64 - if uscal == 1 { - if upper { - sumj = bi.Ddot(j, a[j:], lda, x, 1) - } else if j < n-1 { - sumj = bi.Ddot(n-j-1, a[(j+1)*lda+j:], lda, x[j+1:], 1) - } - } else { - if upper { - for i := 0; i < j; i++ { - sumj += (a[i*lda+j] * uscal) * x[i] - } - } else if j < n { - for i := j + 1; i < n; i++ { - sumj += (a[i*lda+j] * uscal) * x[i] - } - } - } - if uscal == tscal { - x[j] -= sumj - xj := math.Abs(x[j]) - var tjjs float64 - if nonUnit { - tjjs = a[j*lda+j] * tscal - } else { - tjjs = tscal - if tscal == 1 { - goto Skip2 - } - } - tjj := math.Abs(tjjs) - if tjj > smlnum { - if tjj < 1 { - if xj > tjj*bignum { - rec = 1 / xj - bi.Dscal(n, rec, x, 1) - scale *= rec - xmax *= rec - } - } - x[j] /= tjjs - } else if tjj > 0 { - if xj > tjj*bignum { - rec = (tjj * bignum) / xj - bi.Dscal(n, rec, x, 1) - scale *= rec - xmax *= rec - } - x[j] /= tjjs - } else { - for i := 0; i < n; i++ { - x[i] = 0 - } - x[j] = 1 - scale = 0 - xmax = 0 - } - } else { - x[j] = x[j]/tjjs - sumj - } - Skip2: - xmax = math.Max(xmax, math.Abs(x[j])) - } - } - scale /= tscal - if tscal != 1 { - bi.Dscal(n, 1/tscal, cnorm, 1) - } - return scale -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go deleted file mode 100644 index ecce22cc6..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlauu2 computes the product -// U * U^T if uplo is blas.Upper -// L^T * L if uplo is blas.Lower -// where U or L is stored in the upper or lower triangular part of A. -// Only the upper or lower triangle of the result is stored, overwriting -// the corresponding factor in A. -func (impl Implementation) Dlauu2(uplo blas.Uplo, n int, a []float64, lda int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - bi := blas64.Implementation() - - if uplo == blas.Upper { - // Compute the product U*U^T. - for i := 0; i < n; i++ { - aii := a[i*lda+i] - if i < n-1 { - a[i*lda+i] = bi.Ddot(n-i, a[i*lda+i:], 1, a[i*lda+i:], 1) - bi.Dgemv(blas.NoTrans, i, n-i-1, 1, a[i+1:], lda, a[i*lda+i+1:], 1, - aii, a[i:], lda) - } else { - bi.Dscal(i+1, aii, a[i:], lda) - } - } - } else { - // Compute the product L^T*L. - for i := 0; i < n; i++ { - aii := a[i*lda+i] - if i < n-1 { - a[i*lda+i] = bi.Ddot(n-i, a[i*lda+i:], lda, a[i*lda+i:], lda) - bi.Dgemv(blas.Trans, n-i-1, i, 1, a[(i+1)*lda:], lda, a[(i+1)*lda+i:], lda, - aii, a[i*lda:], 1) - } else { - bi.Dscal(i+1, aii, a[i*lda:], 1) - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go deleted file mode 100644 index 67ecaddf4..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dlauum computes the product -// U * U^T if uplo is blas.Upper -// L^T * L if uplo is blas.Lower -// where U or L is stored in the upper or lower triangular part of A. -// Only the upper or lower triangle of the result is stored, overwriting -// the corresponding factor in A. -func (impl Implementation) Dlauum(uplo blas.Uplo, n int, a []float64, lda int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - // Determine the block size. - opts := "U" - if uplo == blas.Lower { - opts = "L" - } - nb := impl.Ilaenv(1, "DLAUUM", opts, n, -1, -1, -1) - - if nb <= 1 || n <= nb { - // Use unblocked code. - impl.Dlauu2(uplo, n, a, lda) - return - } - - // Use blocked code. - bi := blas64.Implementation() - if uplo == blas.Upper { - // Compute the product U*U^T. - for i := 0; i < n; i += nb { - ib := min(nb, n-i) - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.NonUnit, - i, ib, 1, a[i*lda+i:], lda, a[i:], lda) - impl.Dlauu2(blas.Upper, ib, a[i*lda+i:], lda) - if n-i-ib > 0 { - bi.Dgemm(blas.NoTrans, blas.Trans, i, ib, n-i-ib, - 1, a[i+ib:], lda, a[i*lda+i+ib:], lda, 1, a[i:], lda) - bi.Dsyrk(blas.Upper, blas.NoTrans, ib, n-i-ib, - 1, a[i*lda+i+ib:], lda, 1, a[i*lda+i:], lda) - } - } - } else { - // Compute the product L^T*L. - for i := 0; i < n; i += nb { - ib := min(nb, n-i) - bi.Dtrmm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, - ib, i, 1, a[i*lda+i:], lda, a[i*lda:], lda) - impl.Dlauu2(blas.Lower, ib, a[i*lda+i:], lda) - if n-i-ib > 0 { - bi.Dgemm(blas.Trans, blas.NoTrans, ib, i, n-i-ib, - 1, a[(i+ib)*lda+i:], lda, a[(i+ib)*lda:], lda, 1, a[i*lda:], lda) - bi.Dsyrk(blas.Lower, blas.Trans, ib, n-i-ib, - 1, a[(i+ib)*lda+i:], lda, 1, a[i*lda+i:], lda) - } - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go deleted file mode 100644 index 579428927..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gonum is a pure-go implementation of the LAPACK API. The LAPACK API defines -// a set of algorithms for advanced matrix operations. -// -// The function definitions and implementations follow that of the netlib reference -// implementation. See http://www.netlib.org/lapack/explore-html/ for more -// information, and http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html -// for more license information. -// -// Slice function arguments frequently represent vectors and matrices. The data -// layout is identical to that found in https://godoc.org/gonum.org/v1/gonum/blas/gonum. -// -// Most LAPACK functions are built on top the routines defined in the BLAS API, -// and as such the computation time for many LAPACK functions is -// dominated by BLAS calls. Here, BLAS is accessed through the -// blas64 package (https://godoc.org/golang.org/v1/gonum/blas/blas64). In particular, -// this implies that an external BLAS library will be used if it is -// registered in blas64. -// -// The full LAPACK capability has not been implemented at present. The full -// API is very large, containing approximately 200 functions for double precision -// alone. Future additions will be focused on supporting the gonum matrix -// package (https://godoc.org/github.com/gonum/matrix/mat64), though pull requests -// with implementations and tests for LAPACK function are encouraged. -package gonum // import "gonum.org/v1/gonum/lapack/gonum" diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go deleted file mode 100644 index a20765a9e..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dorg2l generates an m×n matrix Q with orthonormal columns which is defined -// as the last n columns of a product of k elementary reflectors of order m. -// Q = H_{k-1} * ... * H_1 * H_0 -// See Dgelqf for more information. It must be that m >= n >= k. -// -// tau contains the scalar reflectors computed by Dgeqlf. tau must have length -// at least k, and Dorg2l will panic otherwise. -// -// work contains temporary memory, and must have length at least n. Dorg2l will -// panic otherwise. -// -// Dorg2l is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorg2l(m, n, k int, a []float64, lda int, tau, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case n > m: - panic(nGTM) - case k < 0: - panic(kLT0) - case k > n: - panic(kGTN) - case lda < max(1, n): - panic(badLdA) - } - - if n == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(work) < n: - panic(shortWork) - } - - // Initialize columns 0:n-k to columns of the unit matrix. - for j := 0; j < n-k; j++ { - for l := 0; l < m; l++ { - a[l*lda+j] = 0 - } - a[(m-n+j)*lda+j] = 1 - } - - bi := blas64.Implementation() - for i := 0; i < k; i++ { - ii := n - k + i - - // Apply H_i to A[0:m-k+i, 0:n-k+i] from the left. - a[(m-n+ii)*lda+ii] = 1 - impl.Dlarf(blas.Left, m-n+ii+1, ii, a[ii:], lda, tau[i], a, lda, work) - bi.Dscal(m-n+ii, -tau[i], a[ii:], lda) - a[(m-n+ii)*lda+ii] = 1 - tau[i] - - // Set A[m-k+i:m, n-k+i+1] to zero. - for l := m - n + ii + 1; l < m; l++ { - a[l*lda+ii] = 0 - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go deleted file mode 100644 index de4477571..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dorg2r generates an m×n matrix Q with orthonormal columns defined by the -// product of elementary reflectors as computed by Dgeqrf. -// Q = H_0 * H_1 * ... * H_{k-1} -// len(tau) >= k, 0 <= k <= n, 0 <= n <= m, len(work) >= n. -// Dorg2r will panic if these conditions are not met. -// -// Dorg2r is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorg2r(m, n, k int, a []float64, lda int, tau []float64, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case n > m: - panic(nGTM) - case k < 0: - panic(kLT0) - case k > n: - panic(kGTN) - case lda < max(1, n): - panic(badLdA) - } - - if n == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(work) < n: - panic(shortWork) - } - - bi := blas64.Implementation() - - // Initialize columns k+1:n to columns of the unit matrix. - for l := 0; l < m; l++ { - for j := k; j < n; j++ { - a[l*lda+j] = 0 - } - } - for j := k; j < n; j++ { - a[j*lda+j] = 1 - } - for i := k - 1; i >= 0; i-- { - for i := range work { - work[i] = 0 - } - if i < n-1 { - a[i*lda+i] = 1 - impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tau[i], a[i*lda+i+1:], lda, work) - } - if i < m-1 { - bi.Dscal(m-i-1, -tau[i], a[(i+1)*lda+i:], lda) - } - a[i*lda+i] = 1 - tau[i] - for l := 0; l < i; l++ { - a[l*lda+i] = 0 - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go deleted file mode 100644 index 626cad5ff..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/lapack" - -// Dorgbr generates one of the matrices Q or P^T computed by Dgebrd -// computed from the decomposition Dgebrd. See Dgebd2 for the description of -// Q and P^T. -// -// If vect == lapack.GenerateQ, then a is assumed to have been an m×k matrix and -// Q is of order m. If m >= k, then Dorgbr returns the first n columns of Q -// where m >= n >= k. If m < k, then Dorgbr returns Q as an m×m matrix. -// -// If vect == lapack.GeneratePT, then A is assumed to have been a k×n matrix, and -// P^T is of order n. If k < n, then Dorgbr returns the first m rows of P^T, -// where n >= m >= k. If k >= n, then Dorgbr returns P^T as an n×n matrix. -// -// Dorgbr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorgbr(vect lapack.GenOrtho, m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { - wantq := vect == lapack.GenerateQ - mn := min(m, n) - switch { - case vect != lapack.GenerateQ && vect != lapack.GeneratePT: - panic(badGenOrtho) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case wantq && n > m: - panic(nGTM) - case wantq && n < min(m, k): - panic("lapack: n < min(m,k)") - case !wantq && m > n: - panic(mGTN) - case !wantq && m < min(n, k): - panic("lapack: m < min(n,k)") - case lda < max(1, n) && lwork != -1: - // Normally, we follow the reference and require the leading - // dimension to be always valid, even in case of workspace - // queries. However, if a caller provided a placeholder value - // for lda (and a) when doing a workspace query that didn't - // fulfill the condition here, it would cause a panic. This is - // exactly what Dgesvd does. - panic(badLdA) - case lwork < max(1, mn) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - work[0] = 1 - if m == 0 || n == 0 { - return - } - - if wantq { - if m >= k { - impl.Dorgqr(m, n, k, a, lda, tau, work, -1) - } else if m > 1 { - impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, -1) - } - } else { - if k < n { - impl.Dorglq(m, n, k, a, lda, tau, work, -1) - } else if n > 1 { - impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, -1) - } - } - lworkopt := int(work[0]) - lworkopt = max(lworkopt, mn) - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case wantq && len(tau) < min(m, k): - panic(shortTau) - case !wantq && len(tau) < min(n, k): - panic(shortTau) - } - - if wantq { - // Form Q, determined by a call to Dgebrd to reduce an m×k matrix. - if m >= k { - impl.Dorgqr(m, n, k, a, lda, tau, work, lwork) - } else { - // Shift the vectors which define the elementary reflectors one - // column to the right, and set the first row and column of Q to - // those of the unit matrix. - for j := m - 1; j >= 1; j-- { - a[j] = 0 - for i := j + 1; i < m; i++ { - a[i*lda+j] = a[i*lda+j-1] - } - } - a[0] = 1 - for i := 1; i < m; i++ { - a[i*lda] = 0 - } - if m > 1 { - // Form Q[1:m-1, 1:m-1] - impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, lwork) - } - } - } else { - // Form P^T, determined by a call to Dgebrd to reduce a k×n matrix. - if k < n { - impl.Dorglq(m, n, k, a, lda, tau, work, lwork) - } else { - // Shift the vectors which define the elementary reflectors one - // row downward, and set the first row and column of P^T to - // those of the unit matrix. - a[0] = 1 - for i := 1; i < n; i++ { - a[i*lda] = 0 - } - for j := 1; j < n; j++ { - for i := j - 1; i >= 1; i-- { - a[i*lda+j] = a[(i-1)*lda+j] - } - a[j] = 0 - } - if n > 1 { - impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) - } - } - } - work[0] = float64(lworkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go deleted file mode 100644 index 6e799d10d..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -// Dorghr generates an n×n orthogonal matrix Q which is defined as the product -// of ihi-ilo elementary reflectors: -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. -// -// a and lda represent an n×n matrix that contains the elementary reflectors, as -// returned by Dgehrd. On return, a is overwritten by the n×n orthogonal matrix -// Q. Q will be equal to the identity matrix except in the submatrix -// Q[ilo+1:ihi+1,ilo+1:ihi+1]. -// -// ilo and ihi must have the same values as in the previous call of Dgehrd. It -// must hold that -// 0 <= ilo <= ihi < n, if n > 0, -// ilo = 0, ihi = -1, if n == 0. -// -// tau contains the scalar factors of the elementary reflectors, as returned by -// Dgehrd. tau must have length n-1. -// -// work must have length at least max(1,lwork) and lwork must be at least -// ihi-ilo. For optimum performance lwork must be at least (ihi-ilo)*nb where nb -// is the optimal blocksize. On return, work[0] will contain the optimal value -// of lwork. -// -// If lwork == -1, instead of performing Dorghr, only the optimal value of lwork -// will be stored into work[0]. -// -// If any requirement on input sizes is not met, Dorghr will panic. -// -// Dorghr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorghr(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { - nh := ihi - ilo - switch { - case ilo < 0 || max(1, n) <= ilo: - panic(badIlo) - case ihi < min(ilo, n-1) || n <= ihi: - panic(badIhi) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, nh) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - work[0] = 1 - return - } - - lwkopt := max(1, nh) * impl.Ilaenv(1, "DORGQR", " ", nh, nh, nh, -1) - if lwork == -1 { - work[0] = float64(lwkopt) - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(tau) < n-1: - panic(shortTau) - } - - // Shift the vectors which define the elementary reflectors one column - // to the right. - for i := ilo + 2; i < ihi+1; i++ { - copy(a[i*lda+ilo+1:i*lda+i], a[i*lda+ilo:i*lda+i-1]) - } - // Set the first ilo+1 and the last n-ihi-1 rows and columns to those of - // the identity matrix. - for i := 0; i < ilo+1; i++ { - for j := 0; j < n; j++ { - a[i*lda+j] = 0 - } - a[i*lda+i] = 1 - } - for i := ilo + 1; i < ihi+1; i++ { - for j := 0; j <= ilo; j++ { - a[i*lda+j] = 0 - } - for j := i; j < n; j++ { - a[i*lda+j] = 0 - } - } - for i := ihi + 1; i < n; i++ { - for j := 0; j < n; j++ { - a[i*lda+j] = 0 - } - a[i*lda+i] = 1 - } - if nh > 0 { - // Generate Q[ilo+1:ihi+1,ilo+1:ihi+1]. - impl.Dorgqr(nh, nh, nh, a[(ilo+1)*lda+ilo+1:], lda, tau[ilo:ihi], work, lwork) - } - work[0] = float64(lwkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go deleted file mode 100644 index b5566b9de..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dorgl2 generates an m×n matrix Q with orthonormal rows defined by the -// first m rows product of elementary reflectors as computed by Dgelqf. -// Q = H_0 * H_1 * ... * H_{k-1} -// len(tau) >= k, 0 <= k <= m, 0 <= m <= n, len(work) >= m. -// Dorgl2 will panic if these conditions are not met. -// -// Dorgl2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorgl2(m, n, k int, a []float64, lda int, tau, work []float64) { - switch { - case m < 0: - panic(mLT0) - case n < m: - panic(nLTM) - case k < 0: - panic(kLT0) - case k > m: - panic(kGTM) - case lda < max(1, m): - panic(badLdA) - } - - if m == 0 { - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(work) < m: - panic(shortWork) - } - - bi := blas64.Implementation() - - if k < m { - for i := k; i < m; i++ { - for j := 0; j < n; j++ { - a[i*lda+j] = 0 - } - } - for j := k; j < m; j++ { - a[j*lda+j] = 1 - } - } - for i := k - 1; i >= 0; i-- { - if i < n-1 { - if i < m-1 { - a[i*lda+i] = 1 - impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tau[i], a[(i+1)*lda+i:], lda, work) - } - bi.Dscal(n-i-1, -tau[i], a[i*lda+i+1:], 1) - } - a[i*lda+i] = 1 - tau[i] - for l := 0; l < i; l++ { - a[i*lda+l] = 0 - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go deleted file mode 100644 index a6dd980ce..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dorglq generates an m×n matrix Q with orthonormal columns defined by the -// product of elementary reflectors as computed by Dgelqf. -// Q = H_0 * H_1 * ... * H_{k-1} -// Dorglq is the blocked version of Dorgl2 that makes greater use of level-3 BLAS -// routines. -// -// len(tau) >= k, 0 <= k <= m, and 0 <= m <= n. -// -// work is temporary storage, and lwork specifies the usable memory length. At minimum, -// lwork >= m, and the amount of blocking is limited by the usable length. -// If lwork == -1, instead of computing Dorglq the optimal work length is stored -// into work[0]. -// -// Dorglq will panic if the conditions on input values are not met. -// -// Dorglq is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorglq(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case m < 0: - panic(mLT0) - case n < m: - panic(nLTM) - case k < 0: - panic(kLT0) - case k > m: - panic(kGTM) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, m) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - if m == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(1, "DORGLQ", " ", m, n, k, -1) - if lwork == -1 { - work[0] = float64(m * nb) - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - } - - nbmin := 2 // Minimum block size - var nx int // Crossover size from blocked to unbloked code - iws := m // Length of work needed - var ldwork int - if 1 < nb && nb < k { - nx = max(0, impl.Ilaenv(3, "DORGLQ", " ", m, n, k, -1)) - if nx < k { - ldwork = nb - iws = m * ldwork - if lwork < iws { - nb = lwork / m - ldwork = nb - nbmin = max(2, impl.Ilaenv(2, "DORGLQ", " ", m, n, k, -1)) - } - } - } - - var ki, kk int - if nbmin <= nb && nb < k && nx < k { - // The first kk rows are handled by the blocked method. - ki = ((k - nx - 1) / nb) * nb - kk = min(k, ki+nb) - for i := kk; i < m; i++ { - for j := 0; j < kk; j++ { - a[i*lda+j] = 0 - } - } - } - if kk < m { - // Perform the operation on colums kk to the end. - impl.Dorgl2(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) - } - if kk > 0 { - // Perform the operation on column-blocks - for i := ki; i >= 0; i -= nb { - ib := min(nb, k-i) - if i+ib < m { - impl.Dlarft(lapack.Forward, lapack.RowWise, - n-i, ib, - a[i*lda+i:], lda, - tau[i:], - work, ldwork) - - impl.Dlarfb(blas.Right, blas.Trans, lapack.Forward, lapack.RowWise, - m-i-ib, n-i, ib, - a[i*lda+i:], lda, - work, ldwork, - a[(i+ib)*lda+i:], lda, - work[ib*ldwork:], ldwork) - } - impl.Dorgl2(ib, n-i, ib, a[i*lda+i:], lda, tau[i:], work) - for l := i; l < i+ib; l++ { - for j := 0; j < i; j++ { - a[l*lda+j] = 0 - } - } - } - } - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go deleted file mode 100644 index 6927ba4ca..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dorgql generates the m×n matrix Q with orthonormal columns defined as the -// last n columns of a product of k elementary reflectors of order m -// Q = H_{k-1} * ... * H_1 * H_0. -// -// It must hold that -// 0 <= k <= n <= m, -// and Dorgql will panic otherwise. -// -// On entry, the (n-k+i)-th column of A must contain the vector which defines -// the elementary reflector H_i, for i=0,...,k-1, and tau[i] must contain its -// scalar factor. On return, a contains the m×n matrix Q. -// -// tau must have length at least k, and Dorgql will panic otherwise. -// -// work must have length at least max(1,lwork), and lwork must be at least -// max(1,n), otherwise Dorgql will panic. For optimum performance lwork must -// be a sufficiently large multiple of n. -// -// If lwork == -1, instead of computing Dorgql the optimal work length is stored -// into work[0]. -// -// Dorgql is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorgql(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case n > m: - panic(nGTM) - case k < 0: - panic(kLT0) - case k > n: - panic(kGTN) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, n) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(1, "DORGQL", " ", m, n, k, -1) - if lwork == -1 { - work[0] = float64(n * nb) - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - } - - nbmin := 2 - var nx, ldwork int - iws := n - if 1 < nb && nb < k { - // Determine when to cross over from blocked to unblocked code. - nx = max(0, impl.Ilaenv(3, "DORGQL", " ", m, n, k, -1)) - if nx < k { - // Determine if workspace is large enough for blocked code. - iws = n * nb - if lwork < iws { - // Not enough workspace to use optimal nb: reduce nb and determine - // the minimum value of nb. - nb = lwork / n - nbmin = max(2, impl.Ilaenv(2, "DORGQL", " ", m, n, k, -1)) - } - ldwork = nb - } - } - - var kk int - if nbmin <= nb && nb < k && nx < k { - // Use blocked code after the first block. The last kk columns are handled - // by the block method. - kk = min(k, ((k-nx+nb-1)/nb)*nb) - - // Set A(m-kk:m, 0:n-kk) to zero. - for i := m - kk; i < m; i++ { - for j := 0; j < n-kk; j++ { - a[i*lda+j] = 0 - } - } - } - - // Use unblocked code for the first or only block. - impl.Dorg2l(m-kk, n-kk, k-kk, a, lda, tau, work) - if kk > 0 { - // Use blocked code. - for i := k - kk; i < k; i += nb { - ib := min(nb, k-i) - if n-k+i > 0 { - // Form the triangular factor of the block reflector - // H = H_{i+ib-1} * ... * H_{i+1} * H_i. - impl.Dlarft(lapack.Backward, lapack.ColumnWise, m-k+i+ib, ib, - a[n-k+i:], lda, tau[i:], work, ldwork) - - // Apply H to A[0:m-k+i+ib, 0:n-k+i] from the left. - impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Backward, lapack.ColumnWise, - m-k+i+ib, n-k+i, ib, a[n-k+i:], lda, work, ldwork, - a, lda, work[ib*ldwork:], ldwork) - } - - // Apply H to rows 0:m-k+i+ib of current block. - impl.Dorg2l(m-k+i+ib, ib, ib, a[n-k+i:], lda, tau[i:], work) - - // Set rows m-k+i+ib:m of current block to zero. - for j := n - k + i; j < n-k+i+ib; j++ { - for l := m - k + i + ib; l < m; l++ { - a[l*lda+j] = 0 - } - } - } - } - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go deleted file mode 100644 index f07fdaf46..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dorgqr generates an m×n matrix Q with orthonormal columns defined by the -// product of elementary reflectors -// Q = H_0 * H_1 * ... * H_{k-1} -// as computed by Dgeqrf. -// Dorgqr is the blocked version of Dorg2r that makes greater use of level-3 BLAS -// routines. -// -// The length of tau must be at least k, and the length of work must be at least n. -// It also must be that 0 <= k <= n and 0 <= n <= m. -// -// work is temporary storage, and lwork specifies the usable memory length. At -// minimum, lwork >= n, and the amount of blocking is limited by the usable -// length. If lwork == -1, instead of computing Dorgqr the optimal work length -// is stored into work[0]. -// -// Dorgqr will panic if the conditions on input values are not met. -// -// Dorgqr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorgqr(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case n > m: - panic(nGTM) - case k < 0: - panic(kLT0) - case k > n: - panic(kGTN) - case lda < max(1, n) && lwork != -1: - // Normally, we follow the reference and require the leading - // dimension to be always valid, even in case of workspace - // queries. However, if a caller provided a placeholder value - // for lda (and a) when doing a workspace query that didn't - // fulfill the condition here, it would cause a panic. This is - // exactly what Dgesvd does. - panic(badLdA) - case lwork < max(1, n) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - if n == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(1, "DORGQR", " ", m, n, k, -1) - // work is treated as an n×nb matrix - if lwork == -1 { - work[0] = float64(n * nb) - return - } - - switch { - case len(a) < (m-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - } - - nbmin := 2 // Minimum block size - var nx int // Crossover size from blocked to unbloked code - iws := n // Length of work needed - var ldwork int - if 1 < nb && nb < k { - nx = max(0, impl.Ilaenv(3, "DORGQR", " ", m, n, k, -1)) - if nx < k { - ldwork = nb - iws = n * ldwork - if lwork < iws { - nb = lwork / n - ldwork = nb - nbmin = max(2, impl.Ilaenv(2, "DORGQR", " ", m, n, k, -1)) - } - } - } - var ki, kk int - if nbmin <= nb && nb < k && nx < k { - // The first kk columns are handled by the blocked method. - ki = ((k - nx - 1) / nb) * nb - kk = min(k, ki+nb) - for i := 0; i < kk; i++ { - for j := kk; j < n; j++ { - a[i*lda+j] = 0 - } - } - } - if kk < n { - // Perform the operation on colums kk to the end. - impl.Dorg2r(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) - } - if kk > 0 { - // Perform the operation on column-blocks. - for i := ki; i >= 0; i -= nb { - ib := min(nb, k-i) - if i+ib < n { - impl.Dlarft(lapack.Forward, lapack.ColumnWise, - m-i, ib, - a[i*lda+i:], lda, - tau[i:], - work, ldwork) - - impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Forward, lapack.ColumnWise, - m-i, n-i-ib, ib, - a[i*lda+i:], lda, - work, ldwork, - a[i*lda+i+ib:], lda, - work[ib*ldwork:], ldwork) - } - impl.Dorg2r(m-i, ib, ib, a[i*lda+i:], lda, tau[i:], work) - // Set rows 0:i-1 of current block to zero. - for j := i; j < i+ib; j++ { - for l := 0; l < i; l++ { - a[l*lda+j] = 0 - } - } - } - } - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go deleted file mode 100644 index 483fbcae9..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dorgtr generates a real orthogonal matrix Q which is defined as the product -// of n-1 elementary reflectors of order n as returned by Dsytrd. -// -// The construction of Q depends on the value of uplo: -// Q = H_{n-1} * ... * H_1 * H_0 if uplo == blas.Upper -// Q = H_0 * H_1 * ... * H_{n-1} if uplo == blas.Lower -// where H_i is constructed from the elementary reflectors as computed by Dsytrd. -// See the documentation for Dsytrd for more information. -// -// tau must have length at least n-1, and Dorgtr will panic otherwise. -// -// work is temporary storage, and lwork specifies the usable memory length. At -// minimum, lwork >= max(1,n-1), and Dorgtr will panic otherwise. The amount of blocking -// is limited by the usable length. -// If lwork == -1, instead of computing Dorgtr the optimal work length is stored -// into work[0]. -// -// Dorgtr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorgtr(uplo blas.Uplo, n int, a []float64, lda int, tau, work []float64, lwork int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, n-1) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - if n == 0 { - work[0] = 1 - return - } - - var nb int - if uplo == blas.Upper { - nb = impl.Ilaenv(1, "DORGQL", " ", n-1, n-1, n-1, -1) - } else { - nb = impl.Ilaenv(1, "DORGQR", " ", n-1, n-1, n-1, -1) - } - lworkopt := max(1, n-1) * nb - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(tau) < n-1: - panic(shortTau) - } - - if uplo == blas.Upper { - // Q was determined by a call to Dsytrd with uplo == blas.Upper. - // Shift the vectors which define the elementary reflectors one column - // to the left, and set the last row and column of Q to those of the unit - // matrix. - for j := 0; j < n-1; j++ { - for i := 0; i < j; i++ { - a[i*lda+j] = a[i*lda+j+1] - } - a[(n-1)*lda+j] = 0 - } - for i := 0; i < n-1; i++ { - a[i*lda+n-1] = 0 - } - a[(n-1)*lda+n-1] = 1 - - // Generate Q[0:n-1, 0:n-1]. - impl.Dorgql(n-1, n-1, n-1, a, lda, tau, work, lwork) - } else { - // Q was determined by a call to Dsytrd with uplo == blas.Upper. - // Shift the vectors which define the elementary reflectors one column - // to the right, and set the first row and column of Q to those of the unit - // matrix. - for j := n - 1; j > 0; j-- { - a[j] = 0 - for i := j + 1; i < n; i++ { - a[i*lda+j] = a[i*lda+j-1] - } - } - a[0] = 1 - for i := 1; i < n; i++ { - a[i*lda] = 0 - } - if n > 1 { - // Generate Q[1:n, 1:n]. - impl.Dorgqr(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) - } - } - work[0] = float64(lworkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go deleted file mode 100644 index 4b0bd83cc..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dorm2r multiplies a general matrix C by an orthogonal matrix from a QR factorization -// determined by Dgeqrf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, a is a matrix of size m×k, and if side == blas.Right -// a is of size n×k. -// -// tau contains the Householder factors and is of length at least k and this function -// will panic otherwise. -// -// work is temporary storage of length at least n if side == blas.Left -// and at least m if side == blas.Right and this function will panic otherwise. -// -// Dorm2r is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorm2r(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { - left := side == blas.Left - switch { - case !left && side != blas.Right: - panic(badSide) - case trans != blas.Trans && trans != blas.NoTrans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case left && k > m: - panic(kGTM) - case !left && k > n: - panic(kGTN) - case lda < max(1, k): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 || k == 0 { - return - } - - switch { - case left && len(a) < (m-1)*lda+k: - panic(shortA) - case !left && len(a) < (n-1)*lda+k: - panic(shortA) - case len(c) < (m-1)*ldc+n: - panic(shortC) - case len(tau) < k: - panic(shortTau) - case left && len(work) < n: - panic(shortWork) - case !left && len(work) < m: - panic(shortWork) - } - - if left { - if trans == blas.NoTrans { - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - return - } - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - return - } - if trans == blas.NoTrans { - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } - return - } - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go deleted file mode 100644 index 026dc0412..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dormbr applies a multiplicative update to the matrix C based on a -// decomposition computed by Dgebrd. -// -// Dormbr overwrites the m×n matrix C with -// Q * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.NoTrans -// C * Q if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.NoTrans -// Q^T * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.Trans -// C * Q^T if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.Trans -// -// P * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.NoTrans -// C * P if vect == lapack.ApplyP, side == blas.Right, and trans == blas.NoTrans -// P^T * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.Trans -// C * P^T if vect == lapack.ApplyP, side == blas.Right, and trans == blas.Trans -// where P and Q are the orthogonal matrices determined by Dgebrd when reducing -// a matrix A to bidiagonal form: A = Q * B * P^T. See Dgebrd for the -// definitions of Q and P. -// -// If vect == lapack.ApplyQ, A is assumed to have been an nq×k matrix, while if -// vect == lapack.ApplyP, A is assumed to have been a k×nq matrix. nq = m if -// side == blas.Left, while nq = n if side == blas.Right. -// -// tau must have length min(nq,k), and Dormbr will panic otherwise. tau contains -// the elementary reflectors to construct Q or P depending on the value of -// vect. -// -// work must have length at least max(1,lwork), and lwork must be either -1 or -// at least max(1,n) if side == blas.Left, and at least max(1,m) if side == -// blas.Right. For optimum performance lwork should be at least n*nb if side == -// blas.Left, and at least m*nb if side == blas.Right, where nb is the optimal -// block size. On return, work[0] will contain the optimal value of lwork. -// -// If lwork == -1, the function only calculates the optimal value of lwork and -// returns it in work[0]. -// -// Dormbr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dormbr(vect lapack.ApplyOrtho, side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { - nq := n - nw := m - if side == blas.Left { - nq = m - nw = n - } - applyQ := vect == lapack.ApplyQ - switch { - case !applyQ && vect != lapack.ApplyP: - panic(badApplyOrtho) - case side != blas.Left && side != blas.Right: - panic(badSide) - case trans != blas.NoTrans && trans != blas.Trans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case applyQ && lda < max(1, min(nq, k)): - panic(badLdA) - case !applyQ && lda < max(1, nq): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - case lwork < max(1, nw) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if m == 0 || n == 0 { - work[0] = 1 - return - } - - // The current implementation does not use opts, but a future change may - // use these options so construct them. - var opts string - if side == blas.Left { - opts = "L" - } else { - opts = "R" - } - if trans == blas.Trans { - opts += "T" - } else { - opts += "N" - } - var nb int - if applyQ { - if side == blas.Left { - nb = impl.Ilaenv(1, "DORMQR", opts, m-1, n, m-1, -1) - } else { - nb = impl.Ilaenv(1, "DORMQR", opts, m, n-1, n-1, -1) - } - } else { - if side == blas.Left { - nb = impl.Ilaenv(1, "DORMLQ", opts, m-1, n, m-1, -1) - } else { - nb = impl.Ilaenv(1, "DORMLQ", opts, m, n-1, n-1, -1) - } - } - lworkopt := max(1, nw) * nb - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - - minnqk := min(nq, k) - switch { - case applyQ && len(a) < (nq-1)*lda+minnqk: - panic(shortA) - case !applyQ && len(a) < (minnqk-1)*lda+nq: - panic(shortA) - case len(tau) < minnqk: - panic(shortTau) - case len(c) < (m-1)*ldc+n: - panic(shortC) - } - - if applyQ { - // Change the operation to get Q depending on the size of the initial - // matrix to Dgebrd. The size matters due to the storage location of - // the off-diagonal elements. - if nq >= k { - impl.Dormqr(side, trans, m, n, k, a, lda, tau[:k], c, ldc, work, lwork) - } else if nq > 1 { - mi := m - ni := n - 1 - i1 := 0 - i2 := 1 - if side == blas.Left { - mi = m - 1 - ni = n - i1 = 1 - i2 = 0 - } - impl.Dormqr(side, trans, mi, ni, nq-1, a[1*lda:], lda, tau[:nq-1], c[i1*ldc+i2:], ldc, work, lwork) - } - work[0] = float64(lworkopt) - return - } - - transt := blas.Trans - if trans == blas.Trans { - transt = blas.NoTrans - } - - // Change the operation to get P depending on the size of the initial - // matrix to Dgebrd. The size matters due to the storage location of - // the off-diagonal elements. - if nq > k { - impl.Dormlq(side, transt, m, n, k, a, lda, tau, c, ldc, work, lwork) - } else if nq > 1 { - mi := m - ni := n - 1 - i1 := 0 - i2 := 1 - if side == blas.Left { - mi = m - 1 - ni = n - i1 = 1 - i2 = 0 - } - impl.Dormlq(side, transt, mi, ni, nq-1, a[1:], lda, tau, c[i1*ldc+i2:], ldc, work, lwork) - } - work[0] = float64(lworkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go deleted file mode 100644 index c00f44059..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dormhr multiplies an m×n general matrix C with an nq×nq orthogonal matrix Q -// Q * C, if side == blas.Left and trans == blas.NoTrans, -// Q^T * C, if side == blas.Left and trans == blas.Trans, -// C * Q, if side == blas.Right and trans == blas.NoTrans, -// C * Q^T, if side == blas.Right and trans == blas.Trans, -// where nq == m if side == blas.Left and nq == n if side == blas.Right. -// -// Q is defined implicitly as the product of ihi-ilo elementary reflectors, as -// returned by Dgehrd: -// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. -// Q is equal to the identity matrix except in the submatrix -// Q[ilo+1:ihi+1,ilo+1:ihi+1]. -// -// ilo and ihi must have the same values as in the previous call of Dgehrd. It -// must hold that -// 0 <= ilo <= ihi < m, if m > 0 and side == blas.Left, -// ilo = 0 and ihi = -1, if m = 0 and side == blas.Left, -// 0 <= ilo <= ihi < n, if n > 0 and side == blas.Right, -// ilo = 0 and ihi = -1, if n = 0 and side == blas.Right. -// -// a and lda represent an m×m matrix if side == blas.Left and an n×n matrix if -// side == blas.Right. The matrix contains vectors which define the elementary -// reflectors, as returned by Dgehrd. -// -// tau contains the scalar factors of the elementary reflectors, as returned by -// Dgehrd. tau must have length m-1 if side == blas.Left and n-1 if side == -// blas.Right. -// -// c and ldc represent the m×n matrix C. On return, c is overwritten by the -// product with Q. -// -// work must have length at least max(1,lwork), and lwork must be at least -// max(1,n), if side == blas.Left, and max(1,m), if side == blas.Right. For -// optimum performance lwork should be at least n*nb if side == blas.Left and -// m*nb if side == blas.Right, where nb is the optimal block size. On return, -// work[0] will contain the optimal value of lwork. -// -// If lwork == -1, instead of performing Dormhr, only the optimal value of lwork -// will be stored in work[0]. -// -// If any requirement on input sizes is not met, Dormhr will panic. -// -// Dormhr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dormhr(side blas.Side, trans blas.Transpose, m, n, ilo, ihi int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { - nq := n // The order of Q. - nw := m // The minimum length of work. - if side == blas.Left { - nq = m - nw = n - } - switch { - case side != blas.Left && side != blas.Right: - panic(badSide) - case trans != blas.NoTrans && trans != blas.Trans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case ilo < 0 || max(1, nq) <= ilo: - panic(badIlo) - case ihi < min(ilo, nq-1) || nq <= ihi: - panic(badIhi) - case lda < max(1, nq): - panic(badLdA) - case lwork < max(1, nw) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if m == 0 || n == 0 { - work[0] = 1 - return - } - - nh := ihi - ilo - var nb int - if side == blas.Left { - opts := "LN" - if trans == blas.Trans { - opts = "LT" - } - nb = impl.Ilaenv(1, "DORMQR", opts, nh, n, nh, -1) - } else { - opts := "RN" - if trans == blas.Trans { - opts = "RT" - } - nb = impl.Ilaenv(1, "DORMQR", opts, m, nh, nh, -1) - } - lwkopt := max(1, nw) * nb - if lwork == -1 { - work[0] = float64(lwkopt) - return - } - - if nh == 0 { - work[0] = 1 - return - } - - switch { - case len(a) < (nq-1)*lda+nq: - panic(shortA) - case len(c) < (m-1)*ldc+n: - panic(shortC) - case len(tau) != nq-1: - panic(badLenTau) - } - - if side == blas.Left { - impl.Dormqr(side, trans, nh, n, nh, a[(ilo+1)*lda+ilo:], lda, - tau[ilo:ihi], c[(ilo+1)*ldc:], ldc, work, lwork) - } else { - impl.Dormqr(side, trans, m, nh, nh, a[(ilo+1)*lda+ilo:], lda, - tau[ilo:ihi], c[ilo+1:], ldc, work, lwork) - } - work[0] = float64(lwkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go deleted file mode 100644 index 25aa83ac1..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dorml2 multiplies a general matrix C by an orthogonal matrix from an LQ factorization -// determined by Dgelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right -// a is of size k×n. -// -// tau contains the Householder factors and is of length at least k and this function will -// panic otherwise. -// -// work is temporary storage of length at least n if side == blas.Left -// and at least m if side == blas.Right and this function will panic otherwise. -// -// Dorml2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dorml2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { - left := side == blas.Left - switch { - case !left && side != blas.Right: - panic(badSide) - case trans != blas.Trans && trans != blas.NoTrans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case left && k > m: - panic(kGTM) - case !left && k > n: - panic(kGTN) - case left && lda < max(1, m): - panic(badLdA) - case !left && lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if m == 0 || n == 0 || k == 0 { - return - } - - switch { - case left && len(a) < (k-1)*lda+m: - panic(shortA) - case !left && len(a) < (k-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(c) < (m-1)*ldc+n: - panic(shortC) - case left && len(work) < n: - panic(shortWork) - case !left && len(work) < m: - panic(shortWork) - } - - notrans := trans == blas.NoTrans - switch { - case left && notrans: - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - - case left && !notrans: - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - - case !left && notrans: - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } - - case !left && !notrans: - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go deleted file mode 100644 index 6fcfc2fb1..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dormlq multiplies the matrix C by the orthogonal matrix Q defined by the -// slices a and tau. A and tau are as returned from Dgelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right -// A is of size k×n. This uses a blocked algorithm. -// -// work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, -// and this function will panic otherwise. -// Dormlq uses a block algorithm, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dormlq, -// the optimal work length will be stored into work[0]. -// -// tau contains the Householder scales and must have length at least k, and -// this function will panic otherwise. -func (impl Implementation) Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { - left := side == blas.Left - nw := m - if left { - nw = n - } - switch { - case !left && side != blas.Right: - panic(badSide) - case trans != blas.Trans && trans != blas.NoTrans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case left && k > m: - panic(kGTM) - case !left && k > n: - panic(kGTN) - case left && lda < max(1, m): - panic(badLdA) - case !left && lda < max(1, n): - panic(badLdA) - case lwork < max(1, nw) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if m == 0 || n == 0 || k == 0 { - work[0] = 1 - return - } - - const ( - nbmax = 64 - ldt = nbmax - tsize = nbmax * ldt - ) - opts := string(side) + string(trans) - nb := min(nbmax, impl.Ilaenv(1, "DORMLQ", opts, m, n, k, -1)) - lworkopt := max(1, nw)*nb + tsize - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - - switch { - case left && len(a) < (k-1)*lda+m: - panic(shortA) - case !left && len(a) < (k-1)*lda+n: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(c) < (m-1)*ldc+n: - panic(shortC) - } - - nbmin := 2 - if 1 < nb && nb < k { - iws := nw*nb + tsize - if lwork < iws { - nb = (lwork - tsize) / nw - nbmin = max(2, impl.Ilaenv(2, "DORMLQ", opts, m, n, k, -1)) - } - } - if nb < nbmin || k <= nb { - // Call unblocked code. - impl.Dorml2(side, trans, m, n, k, a, lda, tau, c, ldc, work) - work[0] = float64(lworkopt) - return - } - - t := work[:tsize] - wrk := work[tsize:] - ldwrk := nb - - notrans := trans == blas.NoTrans - transt := blas.NoTrans - if notrans { - transt = blas.Trans - } - - switch { - case left && notrans: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, - a[i*lda+i:], lda, - t, ldt, - c[i*ldc:], ldc, - wrk, ldwrk) - } - - case left && !notrans: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, - a[i*lda+i:], lda, - t, ldt, - c[i*ldc:], ldc, - wrk, ldwrk) - } - - case !left && notrans: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, - a[i*lda+i:], lda, - t, ldt, - c[i:], ldc, - wrk, ldwrk) - } - - case !left && !notrans: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, - a[i*lda+i:], lda, - t, ldt, - c[i:], ldc, - wrk, ldwrk) - } - } - work[0] = float64(lworkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go deleted file mode 100644 index 8ae450865..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/lapack" -) - -// Dormqr multiplies an m×n matrix C by an orthogonal matrix Q as -// C = Q * C, if side == blas.Left and trans == blas.NoTrans, -// C = Q^T * C, if side == blas.Left and trans == blas.Trans, -// C = C * Q, if side == blas.Right and trans == blas.NoTrans, -// C = C * Q^T, if side == blas.Right and trans == blas.Trans, -// where Q is defined as the product of k elementary reflectors -// Q = H_0 * H_1 * ... * H_{k-1}. -// -// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. -// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. -// The ith column of A contains the vector which defines the elementary -// reflector H_i and tau[i] contains its scalar factor. tau must have length k -// and Dormqr will panic otherwise. Dgeqrf returns A and tau in the required -// form. -// -// work must have length at least max(1,lwork), and lwork must be at least n if -// side == blas.Left and at least m if side == blas.Right, otherwise Dormqr will -// panic. -// -// work is temporary storage, and lwork specifies the usable memory length. At -// minimum, lwork >= m if side == blas.Left and lwork >= n if side == -// blas.Right, and this function will panic otherwise. Larger values of lwork -// will generally give better performance. On return, work[0] will contain the -// optimal value of lwork. -// -// If lwork is -1, instead of performing Dormqr, the optimal workspace size will -// be stored into work[0]. -func (impl Implementation) Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { - left := side == blas.Left - nq := n - nw := m - if left { - nq = m - nw = n - } - switch { - case !left && side != blas.Right: - panic(badSide) - case trans != blas.NoTrans && trans != blas.Trans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case left && k > m: - panic(kGTM) - case !left && k > n: - panic(kGTN) - case lda < max(1, k): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - case lwork < max(1, nw) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if m == 0 || n == 0 || k == 0 { - work[0] = 1 - return - } - - const ( - nbmax = 64 - ldt = nbmax - tsize = nbmax * ldt - ) - opts := string(side) + string(trans) - nb := min(nbmax, impl.Ilaenv(1, "DORMQR", opts, m, n, k, -1)) - lworkopt := max(1, nw)*nb + tsize - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - - switch { - case len(a) < (nq-1)*lda+k: - panic(shortA) - case len(tau) != k: - panic(badLenTau) - case len(c) < (m-1)*ldc+n: - panic(shortC) - } - - nbmin := 2 - if 1 < nb && nb < k { - if lwork < nw*nb+tsize { - nb = (lwork - tsize) / nw - nbmin = max(2, impl.Ilaenv(2, "DORMQR", opts, m, n, k, -1)) - } - } - - if nb < nbmin || k <= nb { - // Call unblocked code. - impl.Dorm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work) - work[0] = float64(lworkopt) - return - } - - var ( - ldwork = nb - notrans = trans == blas.NoTrans - ) - switch { - case left && notrans: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - work[:tsize], ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, - a[i*lda+i:], lda, - work[:tsize], ldt, - c[i*ldc:], ldc, - work[tsize:], ldwork) - } - - case left && !notrans: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - work[:tsize], ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, - a[i*lda+i:], lda, - work[:tsize], ldt, - c[i*ldc:], ldc, - work[tsize:], ldwork) - } - - case !left && notrans: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - work[:tsize], ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, - a[i*lda+i:], lda, - work[:tsize], ldt, - c[i:], ldc, - work[tsize:], ldwork) - } - - case !left && !notrans: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - work[:tsize], ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, - a[i*lda+i:], lda, - work[:tsize], ldt, - c[i:], ldc, - work[tsize:], ldwork) - } - } - work[0] = float64(lworkopt) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go deleted file mode 100644 index bb03f32c7..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dormr2 multiplies a general matrix C by an orthogonal matrix from a RQ factorization -// determined by Dgerqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, a is a matrix of size k×m, and if side == blas.Right -// a is of size k×n. -// -// tau contains the Householder factors and is of length at least k and this function -// will panic otherwise. -// -// work is temporary storage of length at least n if side == blas.Left -// and at least m if side == blas.Right and this function will panic otherwise. -// -// Dormr2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dormr2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { - left := side == blas.Left - nq := n - nw := m - if left { - nq = m - nw = n - } - switch { - case !left && side != blas.Right: - panic(badSide) - case trans != blas.NoTrans && trans != blas.Trans: - panic(badTrans) - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case k < 0: - panic(kLT0) - case left && k > m: - panic(kGTM) - case !left && k > n: - panic(kGTN) - case lda < max(1, nq): - panic(badLdA) - case ldc < max(1, n): - panic(badLdC) - } - - // Quick return if possible. - if m == 0 || n == 0 || k == 0 { - return - } - - switch { - case len(a) < (k-1)*lda+nq: - panic(shortA) - case len(tau) < k: - panic(shortTau) - case len(c) < (m-1)*ldc+n: - panic(shortC) - case len(work) < nw: - panic(shortWork) - } - - if left { - if trans == blas.NoTrans { - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+(m-k+i)] - a[i*lda+(m-k+i)] = 1 - impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) - a[i*lda+(m-k+i)] = aii - } - return - } - for i := 0; i < k; i++ { - aii := a[i*lda+(m-k+i)] - a[i*lda+(m-k+i)] = 1 - impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) - a[i*lda+(m-k+i)] = aii - } - return - } - if trans == blas.NoTrans { - for i := 0; i < k; i++ { - aii := a[i*lda+(n-k+i)] - a[i*lda+(n-k+i)] = 1 - impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) - a[i*lda+(n-k+i)] = aii - } - return - } - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+(n-k+i)] - a[i*lda+(n-k+i)] = 1 - impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) - a[i*lda+(n-k+i)] = aii - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go deleted file mode 100644 index d261a98b5..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dpbtf2 computes the Cholesky factorization of a symmetric positive banded -// matrix ab. The matrix ab is n×n with kd diagonal bands. The Cholesky -// factorization computed is -// A = U^T * U if ul == blas.Upper -// A = L * L^T if ul == blas.Lower -// ul also specifies the storage of ab. If ul == blas.Upper, then -// ab is stored as an upper-triangular banded matrix with kd super-diagonals, -// and if ul == blas.Lower, ab is stored as a lower-triangular banded matrix -// with kd sub-diagonals. On exit, the banded matrix U or L is stored in-place -// into ab depending on the value of ul. Dpbtf2 returns whether the factorization -// was successfully completed. -// -// The band storage scheme is illustrated below when n = 6, and kd = 2. -// The resulting Cholesky decomposition is stored in the same elements as the -// input band matrix (a11 becomes u11 or l11, etc.). -// -// ul = blas.Upper -// a11 a12 a13 -// a22 a23 a24 -// a33 a34 a35 -// a44 a45 a46 -// a55 a56 * -// a66 * * -// -// ul = blas.Lower -// * * a11 -// * a21 a22 -// a31 a32 a33 -// a42 a43 a44 -// a53 a54 a55 -// a64 a65 a66 -// -// Dpbtf2 is the unblocked version of the algorithm, see Dpbtrf for the blocked -// version. -// -// Dpbtf2 is an internal routine, exported for testing purposes. -func (Implementation) Dpbtf2(uplo blas.Uplo, n, kd int, ab []float64, ldab int) (ok bool) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case kd < 0: - panic(kdLT0) - case ldab < kd+1: - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return true - } - - if len(ab) < (n-1)*ldab+kd { - panic(shortAB) - } - - bi := blas64.Implementation() - - kld := max(1, ldab-1) - if uplo == blas.Upper { - // Compute the Cholesky factorization A = U^T * U. - for j := 0; j < n; j++ { - // Compute U(j,j) and test for non-positive-definiteness. - ajj := ab[j*ldab] - if ajj <= 0 { - return false - } - ajj = math.Sqrt(ajj) - ab[j*ldab] = ajj - // Compute elements j+1:j+kn of row j and update the trailing submatrix - // within the band. - kn := min(kd, n-j-1) - if kn > 0 { - bi.Dscal(kn, 1/ajj, ab[j*ldab+1:], 1) - bi.Dsyr(blas.Upper, kn, -1, ab[j*ldab+1:], 1, ab[(j+1)*ldab:], kld) - } - } - return true - } - // Compute the Cholesky factorization A = L * L^T. - for j := 0; j < n; j++ { - // Compute L(j,j) and test for non-positive-definiteness. - ajj := ab[j*ldab+kd] - if ajj <= 0 { - return false - } - ajj = math.Sqrt(ajj) - ab[j*ldab+kd] = ajj - // Compute elements j+1:j+kn of column j and update the trailing submatrix - // within the band. - kn := min(kd, n-j-1) - if kn > 0 { - bi.Dscal(kn, 1/ajj, ab[(j+1)*ldab+kd-1:], kld) - bi.Dsyr(blas.Lower, kn, -1, ab[(j+1)*ldab+kd-1:], kld, ab[(j+1)*ldab+kd:], kld) - } - } - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrf.go deleted file mode 100644 index 9c038daeb..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrf.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright ©2019 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dpbtrf computes the Cholesky factorization of an n×n symmetric positive -// definite band matrix -// A = U^T * U if uplo == blas.Upper -// A = L * L^T if uplo == blas.Lower -// where U is an upper triangular band matrix and L is lower triangular. kd is -// the number of super- or sub-diagonals of A. -// -// The band storage scheme is illustrated below when n = 6 and kd = 2. Elements -// marked * are not used by the function. -// -// uplo == blas.Upper -// On entry: On return: -// a00 a01 a02 u00 u01 u02 -// a11 a12 a13 u11 u12 u13 -// a22 a23 a24 u22 u23 u24 -// a33 a34 a35 u33 u34 u35 -// a44 a45 * u44 u45 * -// a55 * * u55 * * -// -// uplo == blas.Lower -// On entry: On return: -// * * a00 * * l00 -// * a10 a11 * l10 l11 -// a20 a21 a22 l20 l21 l22 -// a31 a32 a33 l31 l32 l33 -// a42 a43 a44 l42 l43 l44 -// a53 a54 a55 l53 l54 l55 -func (impl Implementation) Dpbtrf(uplo blas.Uplo, n, kd int, ab []float64, ldab int) (ok bool) { - const nbmax = 32 - - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case kd < 0: - panic(kdLT0) - case ldab < kd+1: - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return true - } - - if len(ab) < (n-1)*ldab+kd { - panic(shortAB) - } - - opts := string(blas.Upper) - if uplo == blas.Lower { - opts = string(blas.Lower) - } - nb := impl.Ilaenv(1, "DPBTRF", opts, n, kd, -1, -1) - // The block size must not exceed the semi-bandwidth kd, and must not - // exceed the limit set by the size of the local array work. - nb = min(nb, nbmax) - - if nb <= 1 || kd < nb { - // Use unblocked code. - return impl.Dpbtf2(uplo, n, kd, ab, ldab) - } - - // Use blocked code. - ldwork := nb - work := make([]float64, nb*ldwork) - bi := blas64.Implementation() - if uplo == blas.Upper { - // Compute the Cholesky factorization of a symmetric band - // matrix, given the upper triangle of the matrix in band - // storage. - - // Process the band matrix one diagonal block at a time. - for i := 0; i < n; i += nb { - ib := min(nb, n-i) - // Factorize the diagonal block. - ok := impl.Dpotf2(uplo, ib, ab[i*ldab:], ldab-1) - if !ok { - return false - } - if i+ib >= n { - continue - } - // Update the relevant part of the trailing submatrix. - // If A11 denotes the diagonal block which has just been - // factorized, then we need to update the remaining - // blocks in the diagram: - // - // A11 A12 A13 - // A22 A23 - // A33 - // - // The numbers of rows and columns in the partitioning - // are ib, i2, i3 respectively. The blocks A12, A22 and - // A23 are empty if ib = kd. The upper triangle of A13 - // lies outside the band. - i2 := min(kd-ib, n-i-ib) - if i2 > 0 { - // Update A12. - bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, ib, i2, - 1, ab[i*ldab:], ldab-1, ab[i*ldab+ib:], ldab-1) - // Update A22. - bi.Dsyrk(blas.Upper, blas.Trans, i2, ib, - -1, ab[i*ldab+ib:], ldab-1, 1, ab[(i+ib)*ldab:], ldab-1) - } - i3 := min(ib, n-i-kd) - if i3 > 0 { - // Copy the lower triangle of A13 into the work array. - for ii := 0; ii < ib; ii++ { - for jj := 0; jj <= min(ii, i3-1); jj++ { - work[ii*ldwork+jj] = ab[(i+ii)*ldab+kd-ii+jj] - } - } - // Update A13 (in the work array). - bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, ib, i3, - 1, ab[i*ldab:], ldab-1, work, ldwork) - // Update A23. - if i2 > 0 { - bi.Dgemm(blas.Trans, blas.NoTrans, i2, i3, ib, - -1, ab[i*ldab+ib:], ldab-1, work, ldwork, - 1, ab[(i+ib)*ldab+kd-ib:], ldab-1) - } - // Update A33. - bi.Dsyrk(blas.Upper, blas.Trans, i3, ib, - -1, work, ldwork, 1, ab[(i+kd)*ldab:], ldab-1) - // Copy the lower triangle of A13 back into place. - for ii := 0; ii < ib; ii++ { - for jj := 0; jj <= min(ii, i3-1); jj++ { - ab[(i+ii)*ldab+kd-ii+jj] = work[ii*ldwork+jj] - } - } - } - } - } else { - // Compute the Cholesky factorization of a symmetric band - // matrix, given the lower triangle of the matrix in band - // storage. - - // Process the band matrix one diagonal block at a time. - for i := 0; i < n; i += nb { - ib := min(nb, n-i) - // Factorize the diagonal block. - ok := impl.Dpotf2(uplo, ib, ab[i*ldab+kd:], ldab-1) - if !ok { - return false - } - if i+ib >= n { - continue - } - // Update the relevant part of the trailing submatrix. - // If A11 denotes the diagonal block which has just been - // factorized, then we need to update the remaining - // blocks in the diagram: - // - // A11 - // A21 A22 - // A31 A32 A33 - // - // The numbers of rows and columns in the partitioning - // are ib, i2, i3 respectively. The blocks A21, A22 and - // A32 are empty if ib = kd. The lowr triangle of A31 - // lies outside the band. - i2 := min(kd-ib, n-i-ib) - if i2 > 0 { - // Update A21. - bi.Dtrsm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, i2, ib, - 1, ab[i*ldab+kd:], ldab-1, ab[(i+ib)*ldab+kd-ib:], ldab-1) - // Update A22. - bi.Dsyrk(blas.Lower, blas.NoTrans, i2, ib, - -1, ab[(i+ib)*ldab+kd-ib:], ldab-1, 1, ab[(i+ib)*ldab+kd:], ldab-1) - } - i3 := min(ib, n-i-kd) - if i3 > 0 { - // Copy the upper triangle of A31 into the work array. - for ii := 0; ii < i3; ii++ { - for jj := ii; jj < ib; jj++ { - work[ii*ldwork+jj] = ab[(ii+i+kd)*ldab+jj-ii] - } - } - // Update A31 (in the work array). - bi.Dtrsm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, i3, ib, - 1, ab[i*ldab+kd:], ldab-1, work, ldwork) - // Update A32. - if i2 > 0 { - bi.Dgemm(blas.NoTrans, blas.Trans, i3, i2, ib, - -1, work, ldwork, ab[(i+ib)*ldab+kd-ib:], ldab-1, - 1, ab[(i+kd)*ldab+ib:], ldab-1) - } - // Update A33. - bi.Dsyrk(blas.Lower, blas.NoTrans, i3, ib, - -1, work, ldwork, 1, ab[(i+kd)*ldab+kd:], ldab-1) - // Copy the upper triangle of A31 back into place. - for ii := 0; ii < i3; ii++ { - for jj := ii; jj < ib; jj++ { - ab[(ii+i+kd)*ldab+jj-ii] = work[ii*ldwork+jj] - } - } - } - } - } - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrs.go deleted file mode 100644 index 28dd3dcdd..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtrs.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright ©2019 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dpbtrs solves a system of linear equations A*X = B with an n×n symmetric -// positive definite band matrix A using the Cholesky factorization -// A = U^T * U if uplo == blas.Upper -// A = L * L^T if uplo == blas.Lower -// computed by Dpbtrf. kd is the number of super- or sub-diagonals of A. See the -// documentation for Dpbtrf for a description of the band storage format of A. -// -// On entry, b contains the n×nrhs right hand side matrix B. On return, it is -// overwritten with the solution matrix X. -func (Implementation) Dpbtrs(uplo blas.Uplo, n, kd, nrhs int, ab []float64, ldab int, b []float64, ldb int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case kd < 0: - panic(kdLT0) - case nrhs < 0: - panic(nrhsLT0) - case ldab < kd+1: - panic(badLdA) - case ldb < max(1, nrhs): - panic(badLdB) - } - - // Quick return if possible. - if n == 0 || nrhs == 0 { - return - } - - if len(ab) < (n-1)*ldab+kd { - panic(shortAB) - } - if len(b) < (n-1)*ldb+nrhs { - panic(shortB) - } - - bi := blas64.Implementation() - if uplo == blas.Upper { - // Solve A*X = B where A = U^T*U. - for j := 0; j < nrhs; j++ { - // Solve U^T*Y = B, overwriting B with Y. - bi.Dtbsv(blas.Upper, blas.Trans, blas.NonUnit, n, kd, ab, ldab, b[j:], ldb) - // Solve U*X = Y, overwriting Y with X. - bi.Dtbsv(blas.Upper, blas.NoTrans, blas.NonUnit, n, kd, ab, ldab, b[j:], ldb) - } - } else { - // Solve A*X = B where A = L*L^T. - for j := 0; j < nrhs; j++ { - // Solve L*Y = B, overwriting B with Y. - bi.Dtbsv(blas.Lower, blas.NoTrans, blas.NonUnit, n, kd, ab, ldab, b[j:], ldb) - // Solve L^T*X = Y, overwriting Y with X. - bi.Dtbsv(blas.Lower, blas.Trans, blas.NonUnit, n, kd, ab, ldab, b[j:], ldb) - } - } - return -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go deleted file mode 100644 index 7af4c1872..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dpocon estimates the reciprocal of the condition number of a positive-definite -// matrix A given the Cholesky decomposition of A. The condition number computed -// is based on the 1-norm and the ∞-norm. -// -// anorm is the 1-norm and the ∞-norm of the original matrix A. -// -// work is a temporary data slice of length at least 3*n and Dpocon will panic otherwise. -// -// iwork is a temporary data slice of length at least n and Dpocon will panic otherwise. -func (impl Implementation) Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case anorm < 0: - panic(negANorm) - } - - // Quick return if possible. - if n == 0 { - return 1 - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(work) < 3*n: - panic(shortWork) - case len(iwork) < n: - panic(shortIWork) - } - - if anorm == 0 { - return 0 - } - - bi := blas64.Implementation() - - var ( - smlnum = dlamchS - rcond float64 - sl, su float64 - normin bool - ainvnm float64 - kase int - isave [3]int - ) - for { - ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, &isave) - if kase == 0 { - if ainvnm != 0 { - rcond = (1 / ainvnm) / anorm - } - return rcond - } - if uplo == blas.Upper { - sl = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) - normin = true - su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) - } else { - sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) - normin = true - su = impl.Dlatrs(blas.Lower, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) - } - scale := sl * su - if scale != 1 { - ix := bi.Idamax(n, work, 1) - if scale == 0 || scale < math.Abs(work[ix])*smlnum { - return rcond - } - impl.Drscl(n, scale, work, 1) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go deleted file mode 100644 index 5d3327c2d..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dpotf2 computes the Cholesky decomposition of the symmetric positive definite -// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, -// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T -// is computed and stored in-place into a. If a is not positive definite, false -// is returned. This is the unblocked version of the algorithm. -// -// Dpotf2 is an internal routine. It is exported for testing purposes. -func (Implementation) Dpotf2(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { - switch { - case ul != blas.Upper && ul != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return true - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - bi := blas64.Implementation() - - if ul == blas.Upper { - for j := 0; j < n; j++ { - ajj := a[j*lda+j] - if j != 0 { - ajj -= bi.Ddot(j, a[j:], lda, a[j:], lda) - } - if ajj <= 0 || math.IsNaN(ajj) { - a[j*lda+j] = ajj - return false - } - ajj = math.Sqrt(ajj) - a[j*lda+j] = ajj - if j < n-1 { - bi.Dgemv(blas.Trans, j, n-j-1, - -1, a[j+1:], lda, a[j:], lda, - 1, a[j*lda+j+1:], 1) - bi.Dscal(n-j-1, 1/ajj, a[j*lda+j+1:], 1) - } - } - return true - } - for j := 0; j < n; j++ { - ajj := a[j*lda+j] - if j != 0 { - ajj -= bi.Ddot(j, a[j*lda:], 1, a[j*lda:], 1) - } - if ajj <= 0 || math.IsNaN(ajj) { - a[j*lda+j] = ajj - return false - } - ajj = math.Sqrt(ajj) - a[j*lda+j] = ajj - if j < n-1 { - bi.Dgemv(blas.NoTrans, n-j-1, j, - -1, a[(j+1)*lda:], lda, a[j*lda:], 1, - 1, a[(j+1)*lda+j:], lda) - bi.Dscal(n-j-1, 1/ajj, a[(j+1)*lda+j:], lda) - } - } - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go deleted file mode 100644 index 21241687f..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dpotrf computes the Cholesky decomposition of the symmetric positive definite -// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, -// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T -// is computed and stored in-place into a. If a is not positive definite, false -// is returned. This is the blocked version of the algorithm. -func (impl Implementation) Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { - switch { - case ul != blas.Upper && ul != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return true - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - nb := impl.Ilaenv(1, "DPOTRF", string(ul), n, -1, -1, -1) - if nb <= 1 || n <= nb { - return impl.Dpotf2(ul, n, a, lda) - } - bi := blas64.Implementation() - if ul == blas.Upper { - for j := 0; j < n; j += nb { - jb := min(nb, n-j) - bi.Dsyrk(blas.Upper, blas.Trans, jb, j, - -1, a[j:], lda, - 1, a[j*lda+j:], lda) - ok = impl.Dpotf2(blas.Upper, jb, a[j*lda+j:], lda) - if !ok { - return ok - } - if j+jb < n { - bi.Dgemm(blas.Trans, blas.NoTrans, jb, n-j-jb, j, - -1, a[j:], lda, a[j+jb:], lda, - 1, a[j*lda+j+jb:], lda) - bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, jb, n-j-jb, - 1, a[j*lda+j:], lda, - a[j*lda+j+jb:], lda) - } - } - return true - } - for j := 0; j < n; j += nb { - jb := min(nb, n-j) - bi.Dsyrk(blas.Lower, blas.NoTrans, jb, j, - -1, a[j*lda:], lda, - 1, a[j*lda+j:], lda) - ok := impl.Dpotf2(blas.Lower, jb, a[j*lda+j:], lda) - if !ok { - return ok - } - if j+jb < n { - bi.Dgemm(blas.NoTrans, blas.Trans, n-j-jb, jb, j, - -1, a[(j+jb)*lda:], lda, a[j*lda:], lda, - 1, a[(j+jb)*lda+j:], lda) - bi.Dtrsm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, n-j-jb, jb, - 1, a[j*lda+j:], lda, - a[(j+jb)*lda+j:], lda) - } - } - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go deleted file mode 100644 index 2394775c3..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright ©2019 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/blas" - -// Dpotri computes the inverse of a real symmetric positive definite matrix A -// using its Cholesky factorization. -// -// On entry, a contains the triangular factor U or L from the Cholesky -// factorization A = U^T*U or A = L*L^T, as computed by Dpotrf. -// On return, a contains the upper or lower triangle of the (symmetric) -// inverse of A, overwriting the input factor U or L. -func (impl Implementation) Dpotri(uplo blas.Uplo, n int, a []float64, lda int) (ok bool) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return true - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - // Invert the triangular Cholesky factor U or L. - ok = impl.Dtrtri(uplo, blas.NonUnit, n, a, lda) - if !ok { - return false - } - - // Form inv(U)*inv(U)^T or inv(L)^T*inv(L). - impl.Dlauum(uplo, n, a, lda) - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go deleted file mode 100644 index 689e0439c..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dpotrs solves a system of n linear equations A*X = B where A is an n×n -// symmetric positive definite matrix and B is an n×nrhs matrix. The matrix A is -// represented by its Cholesky factorization -// A = U^T*U if uplo == blas.Upper -// A = L*L^T if uplo == blas.Lower -// as computed by Dpotrf. On entry, B contains the right-hand side matrix B, on -// return it contains the solution matrix X. -func (Implementation) Dpotrs(uplo blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case nrhs < 0: - panic(nrhsLT0) - case lda < max(1, n): - panic(badLdA) - case ldb < max(1, nrhs): - panic(badLdB) - } - - // Quick return if possible. - if n == 0 || nrhs == 0 { - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(b) < (n-1)*ldb+nrhs: - panic(shortB) - } - - bi := blas64.Implementation() - - if uplo == blas.Upper { - // Solve U^T * U * X = B where U is stored in the upper triangle of A. - - // Solve U^T * X = B, overwriting B with X. - bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) - // Solve U * X = B, overwriting B with X. - bi.Dtrsm(blas.Left, blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) - } else { - // Solve L * L^T * X = B where L is stored in the lower triangle of A. - - // Solve L * X = B, overwriting B with X. - bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) - // Solve L^T * X = B, overwriting B with X. - bi.Dtrsm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go b/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go deleted file mode 100644 index b2772dbc2..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas/blas64" -) - -// Drscl multiplies the vector x by 1/a being careful to avoid overflow or -// underflow where possible. -// -// Drscl is an internal routine. It is exported for testing purposes. -func (impl Implementation) Drscl(n int, a float64, x []float64, incX int) { - switch { - case n < 0: - panic(nLT0) - case incX <= 0: - panic(badIncX) - } - - // Quick return if possible. - if n == 0 { - return - } - - if len(x) < 1+(n-1)*incX { - panic(shortX) - } - - bi := blas64.Implementation() - - cden := a - cnum := 1.0 - smlnum := dlamchS - bignum := 1 / smlnum - for { - cden1 := cden * smlnum - cnum1 := cnum / bignum - var mul float64 - var done bool - switch { - case cnum != 0 && math.Abs(cden1) > math.Abs(cnum): - mul = smlnum - done = false - cden = cden1 - case math.Abs(cnum1) > math.Abs(cden): - mul = bignum - done = false - cnum = cnum1 - default: - mul = cnum / cden - done = true - } - bi.Dscal(n, mul, x, incX) - if done { - break - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go deleted file mode 100644 index d6c7861ab..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dsteqr computes the eigenvalues and optionally the eigenvectors of a symmetric -// tridiagonal matrix using the implicit QL or QR method. The eigenvectors of a -// full or band symmetric matrix can also be found if Dsytrd, Dsptrd, or Dsbtrd -// have been used to reduce this matrix to tridiagonal form. -// -// d, on entry, contains the diagonal elements of the tridiagonal matrix. On exit, -// d contains the eigenvalues in ascending order. d must have length n and -// Dsteqr will panic otherwise. -// -// e, on entry, contains the off-diagonal elements of the tridiagonal matrix on -// entry, and is overwritten during the call to Dsteqr. e must have length n-1 and -// Dsteqr will panic otherwise. -// -// z, on entry, contains the n×n orthogonal matrix used in the reduction to -// tridiagonal form if compz == lapack.EVOrig. On exit, if -// compz == lapack.EVOrig, z contains the orthonormal eigenvectors of the -// original symmetric matrix, and if compz == lapack.EVTridiag, z contains the -// orthonormal eigenvectors of the symmetric tridiagonal matrix. z is not used -// if compz == lapack.EVCompNone. -// -// work must have length at least max(1, 2*n-2) if the eigenvectors are computed, -// and Dsteqr will panic otherwise. -// -// Dsteqr is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dsteqr(compz lapack.EVComp, n int, d, e, z []float64, ldz int, work []float64) (ok bool) { - switch { - case compz != lapack.EVCompNone && compz != lapack.EVTridiag && compz != lapack.EVOrig: - panic(badEVComp) - case n < 0: - panic(nLT0) - case ldz < 1, compz != lapack.EVCompNone && ldz < n: - panic(badLdZ) - } - - // Quick return if possible. - if n == 0 { - return true - } - - switch { - case len(d) < n: - panic(shortD) - case len(e) < n-1: - panic(shortE) - case compz != lapack.EVCompNone && len(z) < (n-1)*ldz+n: - panic(shortZ) - case compz != lapack.EVCompNone && len(work) < max(1, 2*n-2): - panic(shortWork) - } - - var icompz int - if compz == lapack.EVOrig { - icompz = 1 - } else if compz == lapack.EVTridiag { - icompz = 2 - } - - if n == 1 { - if icompz == 2 { - z[0] = 1 - } - return true - } - - bi := blas64.Implementation() - - eps := dlamchE - eps2 := eps * eps - safmin := dlamchS - safmax := 1 / safmin - ssfmax := math.Sqrt(safmax) / 3 - ssfmin := math.Sqrt(safmin) / eps2 - - // Compute the eigenvalues and eigenvectors of the tridiagonal matrix. - if icompz == 2 { - impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) - } - const maxit = 30 - nmaxit := n * maxit - - jtot := 0 - - // Determine where the matrix splits and choose QL or QR iteration for each - // block, according to whether top or bottom diagonal element is smaller. - l1 := 0 - nm1 := n - 1 - - type scaletype int - const ( - down scaletype = iota + 1 - up - ) - var iscale scaletype - - for { - if l1 > n-1 { - // Order eigenvalues and eigenvectors. - if icompz == 0 { - impl.Dlasrt(lapack.SortIncreasing, n, d) - } else { - // TODO(btracey): Consider replacing this sort with a call to sort.Sort. - for ii := 1; ii < n; ii++ { - i := ii - 1 - k := i - p := d[i] - for j := ii; j < n; j++ { - if d[j] < p { - k = j - p = d[j] - } - } - if k != i { - d[k] = d[i] - d[i] = p - bi.Dswap(n, z[i:], ldz, z[k:], ldz) - } - } - } - return true - } - if l1 > 0 { - e[l1-1] = 0 - } - var m int - if l1 <= nm1 { - for m = l1; m < nm1; m++ { - test := math.Abs(e[m]) - if test == 0 { - break - } - if test <= (math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1])))*eps { - e[m] = 0 - break - } - } - } - l := l1 - lsv := l - lend := m - lendsv := lend - l1 = m + 1 - if lend == l { - continue - } - - // Scale submatrix in rows and columns L to Lend - anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) - switch { - case anorm == 0: - continue - case anorm > ssfmax: - iscale = down - // Pretend that d and e are matrices with 1 column. - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], 1) - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], 1) - case anorm < ssfmin: - iscale = up - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], 1) - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], 1) - } - - // Choose between QL and QR. - if math.Abs(d[lend]) < math.Abs(d[l]) { - lend = lsv - l = lendsv - } - if lend > l { - // QL Iteration. Look for small subdiagonal element. - for { - if l != lend { - for m = l; m < lend; m++ { - v := math.Abs(e[m]) - if v*v <= (eps2*math.Abs(d[m]))*math.Abs(d[m+1])+safmin { - break - } - } - } else { - m = lend - } - if m < lend { - e[m] = 0 - } - p := d[l] - if m == l { - // Eigenvalue found. - l++ - if l > lend { - break - } - continue - } - - // If remaining matrix is 2×2, use Dlae2 to compute its eigensystem. - if m == l+1 { - if icompz > 0 { - d[l], d[l+1], work[l], work[n-1+l] = impl.Dlaev2(d[l], e[l], d[l+1]) - impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, - n, 2, work[l:], work[n-1+l:], z[l:], ldz) - } else { - d[l], d[l+1] = impl.Dlae2(d[l], e[l], d[l+1]) - } - e[l] = 0 - l += 2 - if l > lend { - break - } - continue - } - - if jtot == nmaxit { - break - } - jtot++ - - // Form shift - g := (d[l+1] - p) / (2 * e[l]) - r := impl.Dlapy2(g, 1) - g = d[m] - p + e[l]/(g+math.Copysign(r, g)) - s := 1.0 - c := 1.0 - p = 0.0 - - // Inner loop - for i := m - 1; i >= l; i-- { - f := s * e[i] - b := c * e[i] - c, s, r = impl.Dlartg(g, f) - if i != m-1 { - e[i+1] = r - } - g = d[i+1] - p - r = (d[i]-g)*s + 2*c*b - p = s * r - d[i+1] = g + p - g = c*r - b - - // If eigenvectors are desired, then save rotations. - if icompz > 0 { - work[i] = c - work[n-1+i] = -s - } - } - // If eigenvectors are desired, then apply saved rotations. - if icompz > 0 { - mm := m - l + 1 - impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, - n, mm, work[l:], work[n-1+l:], z[l:], ldz) - } - d[l] -= p - e[l] = g - } - } else { - // QR Iteration. - // Look for small superdiagonal element. - for { - if l != lend { - for m = l; m > lend; m-- { - v := math.Abs(e[m-1]) - if v*v <= (eps2*math.Abs(d[m])*math.Abs(d[m-1]) + safmin) { - break - } - } - } else { - m = lend - } - if m > lend { - e[m-1] = 0 - } - p := d[l] - if m == l { - // Eigenvalue found - l-- - if l < lend { - break - } - continue - } - - // If remaining matrix is 2×2, use Dlae2 to compute its eigenvalues. - if m == l-1 { - if icompz > 0 { - d[l-1], d[l], work[m], work[n-1+m] = impl.Dlaev2(d[l-1], e[l-1], d[l]) - impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, - n, 2, work[m:], work[n-1+m:], z[l-1:], ldz) - } else { - d[l-1], d[l] = impl.Dlae2(d[l-1], e[l-1], d[l]) - } - e[l-1] = 0 - l -= 2 - if l < lend { - break - } - continue - } - if jtot == nmaxit { - break - } - jtot++ - - // Form shift. - g := (d[l-1] - p) / (2 * e[l-1]) - r := impl.Dlapy2(g, 1) - g = d[m] - p + (e[l-1])/(g+math.Copysign(r, g)) - s := 1.0 - c := 1.0 - p = 0.0 - - // Inner loop. - for i := m; i < l; i++ { - f := s * e[i] - b := c * e[i] - c, s, r = impl.Dlartg(g, f) - if i != m { - e[i-1] = r - } - g = d[i] - p - r = (d[i+1]-g)*s + 2*c*b - p = s * r - d[i] = g + p - g = c*r - b - - // If eigenvectors are desired, then save rotations. - if icompz > 0 { - work[i] = c - work[n-1+i] = s - } - } - - // If eigenvectors are desired, then apply saved rotations. - if icompz > 0 { - mm := l - m + 1 - impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, - n, mm, work[m:], work[n-1+m:], z[m:], ldz) - } - d[l] -= p - e[l-1] = g - } - } - - // Undo scaling if necessary. - switch iscale { - case down: - // Pretend that d and e are matrices with 1 column. - impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], 1) - impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv, 1, e[lsv:], 1) - case up: - impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], 1) - impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv, 1, e[lsv:], 1) - } - - // Check for no convergence to an eigenvalue after a total of n*maxit iterations. - if jtot >= nmaxit { - break - } - } - for i := 0; i < n-1; i++ { - if e[i] != 0 { - return false - } - } - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go deleted file mode 100644 index dc1e178df..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/lapack" -) - -// Dsterf computes all eigenvalues of a symmetric tridiagonal matrix using the -// Pal-Walker-Kahan variant of the QL or QR algorithm. -// -// d contains the diagonal elements of the tridiagonal matrix on entry, and -// contains the eigenvalues in ascending order on exit. d must have length at -// least n, or Dsterf will panic. -// -// e contains the off-diagonal elements of the tridiagonal matrix on entry, and is -// overwritten during the call to Dsterf. e must have length of at least n-1 or -// Dsterf will panic. -// -// Dsterf is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dsterf(n int, d, e []float64) (ok bool) { - if n < 0 { - panic(nLT0) - } - - // Quick return if possible. - if n == 0 { - return true - } - - switch { - case len(d) < n: - panic(shortD) - case len(e) < n-1: - panic(shortE) - } - - if n == 1 { - return true - } - - const ( - none = 0 // The values are not scaled. - down = 1 // The values are scaled below ssfmax threshold. - up = 2 // The values are scaled below ssfmin threshold. - ) - - // Determine the unit roundoff for this environment. - eps := dlamchE - eps2 := eps * eps - safmin := dlamchS - safmax := 1 / safmin - ssfmax := math.Sqrt(safmax) / 3 - ssfmin := math.Sqrt(safmin) / eps2 - - // Compute the eigenvalues of the tridiagonal matrix. - maxit := 30 - nmaxit := n * maxit - jtot := 0 - - l1 := 0 - - for { - if l1 > n-1 { - impl.Dlasrt(lapack.SortIncreasing, n, d) - return true - } - if l1 > 0 { - e[l1-1] = 0 - } - var m int - for m = l1; m < n-1; m++ { - if math.Abs(e[m]) <= math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1]))*eps { - e[m] = 0 - break - } - } - - l := l1 - lsv := l - lend := m - lendsv := lend - l1 = m + 1 - if lend == 0 { - continue - } - - // Scale submatrix in rows and columns l to lend. - anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) - iscale := none - if anorm == 0 { - continue - } - if anorm > ssfmax { - iscale = down - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], n) - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], n) - } else if anorm < ssfmin { - iscale = up - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], n) - impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], n) - } - - el := e[l:lend] - for i, v := range el { - el[i] *= v - } - - // Choose between QL and QR iteration. - if math.Abs(d[lend]) < math.Abs(d[l]) { - lend = lsv - l = lendsv - } - if lend >= l { - // QL Iteration. - // Look for small sub-diagonal element. - for { - if l != lend { - for m = l; m < lend; m++ { - if math.Abs(e[m]) <= eps2*(math.Abs(d[m]*d[m+1])) { - break - } - } - } else { - m = lend - } - if m < lend { - e[m] = 0 - } - p := d[l] - if m == l { - // Eigenvalue found. - l++ - if l > lend { - break - } - continue - } - // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. - if m == l+1 { - d[l], d[l+1] = impl.Dlae2(d[l], math.Sqrt(e[l]), d[l+1]) - e[l] = 0 - l += 2 - if l > lend { - break - } - continue - } - if jtot == nmaxit { - break - } - jtot++ - - // Form shift. - rte := math.Sqrt(e[l]) - sigma := (d[l+1] - p) / (2 * rte) - r := impl.Dlapy2(sigma, 1) - sigma = p - (rte / (sigma + math.Copysign(r, sigma))) - - c := 1.0 - s := 0.0 - gamma := d[m] - sigma - p = gamma * gamma - - // Inner loop. - for i := m - 1; i >= l; i-- { - bb := e[i] - r := p + bb - if i != m-1 { - e[i+1] = s * r - } - oldc := c - c = p / r - s = bb / r - oldgam := gamma - alpha := d[i] - gamma = c*(alpha-sigma) - s*oldgam - d[i+1] = oldgam + (alpha - gamma) - if c != 0 { - p = (gamma * gamma) / c - } else { - p = oldc * bb - } - } - e[l] = s * p - d[l] = sigma + gamma - } - } else { - for { - // QR Iteration. - // Look for small super-diagonal element. - for m = l; m > lend; m-- { - if math.Abs(e[m-1]) <= eps2*math.Abs(d[m]*d[m-1]) { - break - } - } - if m > lend { - e[m-1] = 0 - } - p := d[l] - if m == l { - // Eigenvalue found. - l-- - if l < lend { - break - } - continue - } - - // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. - if m == l-1 { - d[l], d[l-1] = impl.Dlae2(d[l], math.Sqrt(e[l-1]), d[l-1]) - e[l-1] = 0 - l -= 2 - if l < lend { - break - } - continue - } - if jtot == nmaxit { - break - } - jtot++ - - // Form shift. - rte := math.Sqrt(e[l-1]) - sigma := (d[l-1] - p) / (2 * rte) - r := impl.Dlapy2(sigma, 1) - sigma = p - (rte / (sigma + math.Copysign(r, sigma))) - - c := 1.0 - s := 0.0 - gamma := d[m] - sigma - p = gamma * gamma - - // Inner loop. - for i := m; i < l; i++ { - bb := e[i] - r := p + bb - if i != m { - e[i-1] = s * r - } - oldc := c - c = p / r - s = bb / r - oldgam := gamma - alpha := d[i+1] - gamma = c*(alpha-sigma) - s*oldgam - d[i] = oldgam + alpha - gamma - if c != 0 { - p = (gamma * gamma) / c - } else { - p = oldc * bb - } - } - e[l-1] = s * p - d[l] = sigma + gamma - } - } - - // Undo scaling if necessary - switch iscale { - case down: - impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], n) - case up: - impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], n) - } - - // Check for no convergence to an eigenvalue after a total of n*maxit iterations. - if jtot >= nmaxit { - break - } - } - for _, v := range e[:n-1] { - if v != 0 { - return false - } - } - impl.Dlasrt(lapack.SortIncreasing, n, d) - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go deleted file mode 100644 index 5f57f3a5c..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dsyev computes all eigenvalues and, optionally, the eigenvectors of a real -// symmetric matrix A. -// -// w contains the eigenvalues in ascending order upon return. w must have length -// at least n, and Dsyev will panic otherwise. -// -// On entry, a contains the elements of the symmetric matrix A in the triangular -// portion specified by uplo. If jobz == lapack.EVCompute, a contains the -// orthonormal eigenvectors of A on exit, otherwise jobz must be lapack.EVNone -// and on exit the specified triangular region is overwritten. -// -// work is temporary storage, and lwork specifies the usable memory length. At minimum, -// lwork >= 3*n-1, and Dsyev will panic otherwise. The amount of blocking is -// limited by the usable length. If lwork == -1, instead of computing Dsyev the -// optimal work length is stored into work[0]. -func (impl Implementation) Dsyev(jobz lapack.EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) { - switch { - case jobz != lapack.EVNone && jobz != lapack.EVCompute: - panic(badEVJob) - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < max(1, 3*n-1) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - return true - } - - var opts string - if uplo == blas.Upper { - opts = "U" - } else { - opts = "L" - } - nb := impl.Ilaenv(1, "DSYTRD", opts, n, -1, -1, -1) - lworkopt := max(1, (nb+2)*n) - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(w) < n: - panic(shortW) - } - - if n == 1 { - w[0] = a[0] - work[0] = 2 - if jobz == lapack.EVCompute { - a[0] = 1 - } - return true - } - - safmin := dlamchS - eps := dlamchP - smlnum := safmin / eps - bignum := 1 / smlnum - rmin := math.Sqrt(smlnum) - rmax := math.Sqrt(bignum) - - // Scale matrix to allowable range, if necessary. - anrm := impl.Dlansy(lapack.MaxAbs, uplo, n, a, lda, work) - scaled := false - var sigma float64 - if anrm > 0 && anrm < rmin { - scaled = true - sigma = rmin / anrm - } else if anrm > rmax { - scaled = true - sigma = rmax / anrm - } - if scaled { - kind := lapack.LowerTri - if uplo == blas.Upper { - kind = lapack.UpperTri - } - impl.Dlascl(kind, 0, 0, 1, sigma, n, n, a, lda) - } - var inde int - indtau := inde + n - indwork := indtau + n - llwork := lwork - indwork - impl.Dsytrd(uplo, n, a, lda, w, work[inde:], work[indtau:], work[indwork:], llwork) - - // For eigenvalues only, call Dsterf. For eigenvectors, first call Dorgtr - // to generate the orthogonal matrix, then call Dsteqr. - if jobz == lapack.EVNone { - ok = impl.Dsterf(n, w, work[inde:]) - } else { - impl.Dorgtr(uplo, n, a, lda, work[indtau:], work[indwork:], llwork) - ok = impl.Dsteqr(lapack.EVComp(jobz), n, w, work[inde:], a, lda, work[indtau:]) - } - if !ok { - return false - } - - // If the matrix was scaled, then rescale eigenvalues appropriately. - if scaled { - bi := blas64.Implementation() - bi.Dscal(n, 1/sigma, w, 1) - } - work[0] = float64(lworkopt) - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go deleted file mode 100644 index 23cfd0577..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dsytd2 reduces a symmetric n×n matrix A to symmetric tridiagonal form T by -// an orthogonal similarity transformation -// Q^T * A * Q = T -// On entry, the matrix is contained in the specified triangle of a. On exit, -// if uplo == blas.Upper, the diagonal and first super-diagonal of a are -// overwritten with the elements of T. The elements above the first super-diagonal -// are overwritten with the elementary reflectors that are used with -// the elements written to tau in order to construct Q. If uplo == blas.Lower, -// the elements are written in the lower triangular region. -// -// d must have length at least n. e and tau must have length at least n-1. Dsytd2 -// will panic if these sizes are not met. -// -// Q is represented as a product of elementary reflectors. -// If uplo == blas.Upper -// Q = H_{n-2} * ... * H_1 * H_0 -// and if uplo == blas.Lower -// Q = H_0 * H_1 * ... * H_{n-2} -// where -// H_i = I - tau * v * v^T -// where tau is stored in tau[i], and v is stored in a. -// -// If uplo == blas.Upper, v[0:i-1] is stored in A[0:i-1,i+1], v[i] = 1, and -// v[i+1:] = 0. The elements of a are -// [ d e v2 v3 v4] -// [ d e v3 v4] -// [ d e v4] -// [ d e] -// [ d] -// If uplo == blas.Lower, v[0:i+1] = 0, v[i+1] = 1, and v[i+2:] is stored in -// A[i+2:n,i]. -// The elements of a are -// [ d ] -// [ e d ] -// [v1 e d ] -// [v1 v2 e d ] -// [v1 v2 v3 e d] -// -// Dsytd2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dsytd2(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau []float64) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - // Quick return if possible. - if n == 0 { - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(d) < n: - panic(shortD) - case len(e) < n-1: - panic(shortE) - case len(tau) < n-1: - panic(shortTau) - } - - bi := blas64.Implementation() - - if uplo == blas.Upper { - // Reduce the upper triangle of A. - for i := n - 2; i >= 0; i-- { - // Generate elementary reflector H_i = I - tau * v * v^T to - // annihilate A[i:i-1, i+1]. - var taui float64 - a[i*lda+i+1], taui = impl.Dlarfg(i+1, a[i*lda+i+1], a[i+1:], lda) - e[i] = a[i*lda+i+1] - if taui != 0 { - // Apply H_i from both sides to A[0:i,0:i]. - a[i*lda+i+1] = 1 - - // Compute x := tau * A * v storing x in tau[0:i]. - bi.Dsymv(uplo, i+1, taui, a, lda, a[i+1:], lda, 0, tau, 1) - - // Compute w := x - 1/2 * tau * (x^T * v) * v. - alpha := -0.5 * taui * bi.Ddot(i+1, tau, 1, a[i+1:], lda) - bi.Daxpy(i+1, alpha, a[i+1:], lda, tau, 1) - - // Apply the transformation as a rank-2 update - // A = A - v * w^T - w * v^T. - bi.Dsyr2(uplo, i+1, -1, a[i+1:], lda, tau, 1, a, lda) - a[i*lda+i+1] = e[i] - } - d[i+1] = a[(i+1)*lda+i+1] - tau[i] = taui - } - d[0] = a[0] - return - } - // Reduce the lower triangle of A. - for i := 0; i < n-1; i++ { - // Generate elementary reflector H_i = I - tau * v * v^T to - // annihilate A[i+2:n, i]. - var taui float64 - a[(i+1)*lda+i], taui = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) - e[i] = a[(i+1)*lda+i] - if taui != 0 { - // Apply H_i from both sides to A[i+1:n, i+1:n]. - a[(i+1)*lda+i] = 1 - - // Compute x := tau * A * v, storing y in tau[i:n-1]. - bi.Dsymv(uplo, n-i-1, taui, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, tau[i:], 1) - - // Compute w := x - 1/2 * tau * (x^T * v) * v. - alpha := -0.5 * taui * bi.Ddot(n-i-1, tau[i:], 1, a[(i+1)*lda+i:], lda) - bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, tau[i:], 1) - - // Apply the transformation as a rank-2 update - // A = A - v * w^T - w * v^T. - bi.Dsyr2(uplo, n-i-1, -1, a[(i+1)*lda+i:], lda, tau[i:], 1, a[(i+1)*lda+i+1:], lda) - a[(i+1)*lda+i] = e[i] - } - d[i] = a[i*lda+i] - tau[i] = taui - } - d[n-1] = a[(n-1)*lda+n-1] -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go deleted file mode 100644 index df47568e9..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dsytrd reduces a symmetric n×n matrix A to symmetric tridiagonal form by an -// orthogonal similarity transformation -// Q^T * A * Q = T -// where Q is an orthonormal matrix and T is symmetric and tridiagonal. -// -// On entry, a contains the elements of the input matrix in the triangle specified -// by uplo. On exit, the diagonal and sub/super-diagonal are overwritten by the -// corresponding elements of the tridiagonal matrix T. The remaining elements in -// the triangle, along with the array tau, contain the data to construct Q as -// the product of elementary reflectors. -// -// If uplo == blas.Upper, Q is constructed with -// Q = H_{n-2} * ... * H_1 * H_0 -// where -// H_i = I - tau_i * v * v^T -// v is constructed as v[i+1:n] = 0, v[i] = 1, v[0:i-1] is stored in A[0:i-1, i+1]. -// The elements of A are -// [ d e v1 v2 v3] -// [ d e v2 v3] -// [ d e v3] -// [ d e] -// [ e] -// -// If uplo == blas.Lower, Q is constructed with -// Q = H_0 * H_1 * ... * H_{n-2} -// where -// H_i = I - tau_i * v * v^T -// v is constructed as v[0:i+1] = 0, v[i+1] = 1, v[i+2:n] is stored in A[i+2:n, i]. -// The elements of A are -// [ d ] -// [ e d ] -// [v0 e d ] -// [v0 v1 e d ] -// [v0 v1 v2 e d] -// -// d must have length n, and e and tau must have length n-1. Dsytrd will panic if -// these conditions are not met. -// -// work is temporary storage, and lwork specifies the usable memory length. At minimum, -// lwork >= 1, and Dsytrd will panic otherwise. The amount of blocking is -// limited by the usable length. -// If lwork == -1, instead of computing Dsytrd the optimal work length is stored -// into work[0]. -// -// Dsytrd is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dsytrd(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau, work []float64, lwork int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - case lwork < 1 && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - work[0] = 1 - return - } - - nb := impl.Ilaenv(1, "DSYTRD", string(uplo), n, -1, -1, -1) - lworkopt := n * nb - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(d) < n: - panic(shortD) - case len(e) < n-1: - panic(shortE) - case len(tau) < n-1: - panic(shortTau) - } - - bi := blas64.Implementation() - - nx := n - iws := 1 - var ldwork int - if 1 < nb && nb < n { - // Determine when to cross over from blocked to unblocked code. The last - // block is always handled by unblocked code. - nx = max(nb, impl.Ilaenv(3, "DSYTRD", string(uplo), n, -1, -1, -1)) - if nx < n { - // Determine if workspace is large enough for blocked code. - ldwork = nb - iws = n * ldwork - if lwork < iws { - // Not enough workspace to use optimal nb: determine the minimum - // value of nb and reduce nb or force use of unblocked code by - // setting nx = n. - nb = max(lwork/n, 1) - nbmin := impl.Ilaenv(2, "DSYTRD", string(uplo), n, -1, -1, -1) - if nb < nbmin { - nx = n - } - } - } else { - nx = n - } - } else { - nb = 1 - } - ldwork = nb - - if uplo == blas.Upper { - // Reduce the upper triangle of A. Columns 0:kk are handled by the - // unblocked method. - var i int - kk := n - ((n-nx+nb-1)/nb)*nb - for i = n - nb; i >= kk; i -= nb { - // Reduce columns i:i+nb to tridiagonal form and form the matrix W - // which is needed to update the unreduced part of the matrix. - impl.Dlatrd(uplo, i+nb, nb, a, lda, e, tau, work, ldwork) - - // Update the unreduced submatrix A[0:i-1,0:i-1], using an update - // of the form A = A - V*W^T - W*V^T. - bi.Dsyr2k(uplo, blas.NoTrans, i, nb, -1, a[i:], lda, work, ldwork, 1, a, lda) - - // Copy superdiagonal elements back into A, and diagonal elements into D. - for j := i; j < i+nb; j++ { - a[(j-1)*lda+j] = e[j-1] - d[j] = a[j*lda+j] - } - } - // Use unblocked code to reduce the last or only block - // check that i == kk. - impl.Dsytd2(uplo, kk, a, lda, d, e, tau) - } else { - var i int - // Reduce the lower triangle of A. - for i = 0; i < n-nx; i += nb { - // Reduce columns 0:i+nb to tridiagonal form and form the matrix W - // which is needed to update the unreduced part of the matrix. - impl.Dlatrd(uplo, n-i, nb, a[i*lda+i:], lda, e[i:], tau[i:], work, ldwork) - - // Update the unreduced submatrix A[i+ib:n, i+ib:n], using an update - // of the form A = A + V*W^T - W*V^T. - bi.Dsyr2k(uplo, blas.NoTrans, n-i-nb, nb, -1, a[(i+nb)*lda+i:], lda, - work[nb*ldwork:], ldwork, 1, a[(i+nb)*lda+i+nb:], lda) - - // Copy subdiagonal elements back into A, and diagonal elements into D. - for j := i; j < i+nb; j++ { - a[(j+1)*lda+j] = e[j] - d[j] = a[j*lda+j] - } - } - // Use unblocked code to reduce the last or only block. - impl.Dsytd2(uplo, n-i, a[i*lda+i:], lda, d[i:], e[i:], tau[i:]) - } - work[0] = float64(iws) -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go deleted file mode 100644 index d70d19623..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dtgsja computes the generalized singular value decomposition (GSVD) -// of two real upper triangular or trapezoidal matrices A and B. -// -// A and B have the following forms, which may be obtained by the -// preprocessing subroutine Dggsvp from a general m×n matrix A and p×n -// matrix B: -// -// n-k-l k l -// A = k [ 0 A12 A13 ] if m-k-l >= 0; -// l [ 0 0 A23 ] -// m-k-l [ 0 0 0 ] -// -// n-k-l k l -// A = k [ 0 A12 A13 ] if m-k-l < 0; -// m-k [ 0 0 A23 ] -// -// n-k-l k l -// B = l [ 0 0 B13 ] -// p-l [ 0 0 0 ] -// -// where the k×k matrix A12 and l×l matrix B13 are non-singular -// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, -// otherwise A23 is (m-k)×l upper trapezoidal. -// -// On exit, -// -// U^T*A*Q = D1*[ 0 R ], V^T*B*Q = D2*[ 0 R ], -// -// where U, V and Q are orthogonal matrices. -// R is a non-singular upper triangular matrix, and D1 and D2 are -// diagonal matrices, which are of the following structures: -// -// If m-k-l >= 0, -// -// k l -// D1 = k [ I 0 ] -// l [ 0 C ] -// m-k-l [ 0 0 ] -// -// k l -// D2 = l [ 0 S ] -// p-l [ 0 0 ] -// -// n-k-l k l -// [ 0 R ] = k [ 0 R11 R12 ] k -// l [ 0 0 R22 ] l -// -// where -// -// C = diag( alpha_k, ... , alpha_{k+l} ), -// S = diag( beta_k, ... , beta_{k+l} ), -// C^2 + S^2 = I. -// -// R is stored in -// A[0:k+l, n-k-l:n] -// on exit. -// -// If m-k-l < 0, -// -// k m-k k+l-m -// D1 = k [ I 0 0 ] -// m-k [ 0 C 0 ] -// -// k m-k k+l-m -// D2 = m-k [ 0 S 0 ] -// k+l-m [ 0 0 I ] -// p-l [ 0 0 0 ] -// -// n-k-l k m-k k+l-m -// [ 0 R ] = k [ 0 R11 R12 R13 ] -// m-k [ 0 0 R22 R23 ] -// k+l-m [ 0 0 0 R33 ] -// -// where -// C = diag( alpha_k, ... , alpha_m ), -// S = diag( beta_k, ... , beta_m ), -// C^2 + S^2 = I. -// -// R = [ R11 R12 R13 ] is stored in A[0:m, n-k-l:n] -// [ 0 R22 R23 ] -// and R33 is stored in -// B[m-k:l, n+m-k-l:n] on exit. -// -// The computation of the orthogonal transformation matrices U, V or Q -// is optional. These matrices may either be formed explicitly, or they -// may be post-multiplied into input matrices U1, V1, or Q1. -// -// Dtgsja essentially uses a variant of Kogbetliantz algorithm to reduce -// min(l,m-k)×l triangular or trapezoidal matrix A23 and l×l -// matrix B13 to the form: -// -// U1^T*A13*Q1 = C1*R1; V1^T*B13*Q1 = S1*R1, -// -// where U1, V1 and Q1 are orthogonal matrices. C1 and S1 are diagonal -// matrices satisfying -// -// C1^2 + S1^2 = I, -// -// and R1 is an l×l non-singular upper triangular matrix. -// -// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior -// is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDUnit Use unit-initialized matrix -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. -// The behavior is the same for jobV and jobQ with the exception that instead of -// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. -// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the -// relevant job parameter is lapack.GSVDNone. -// -// k and l specify the sub-blocks in the input matrices A and B: -// A23 = A[k:min(k+l,m), n-l:n) and B13 = B[0:l, n-l:n] -// of A and B, whose GSVD is going to be computed by Dtgsja. -// -// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz -// iteration procedure. Generally, they are the same as used in the preprocessing -// step, for example, -// tola = max(m, n)*norm(A)*eps, -// tolb = max(p, n)*norm(B)*eps, -// where eps is the machine epsilon. -// -// work must have length at least 2*n, otherwise Dtgsja will panic. -// -// alpha and beta must have length n or Dtgsja will panic. On exit, alpha and -// beta contain the generalized singular value pairs of A and B -// alpha[0:k] = 1, -// beta[0:k] = 0, -// if m-k-l >= 0, -// alpha[k:k+l] = diag(C), -// beta[k:k+l] = diag(S), -// if m-k-l < 0, -// alpha[k:m]= C, alpha[m:k+l]= 0 -// beta[k:m] = S, beta[m:k+l] = 1. -// if k+l < n, -// alpha[k+l:n] = 0 and -// beta[k+l:n] = 0. -// -// On exit, A[n-k:n, 0:min(k+l,m)] contains the triangular matrix R or part of R -// and if necessary, B[m-k:l, n+m-k-l:n] contains a part of R. -// -// Dtgsja returns whether the routine converged and the number of iteration cycles -// that were run. -// -// Dtgsja is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dtgsja(jobU, jobV, jobQ lapack.GSVDJob, m, p, n, k, l int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64) (cycles int, ok bool) { - const maxit = 40 - - initu := jobU == lapack.GSVDUnit - wantu := initu || jobU == lapack.GSVDU - - initv := jobV == lapack.GSVDUnit - wantv := initv || jobV == lapack.GSVDV - - initq := jobQ == lapack.GSVDUnit - wantq := initq || jobQ == lapack.GSVDQ - - switch { - case !initu && !wantu && jobU != lapack.GSVDNone: - panic(badGSVDJob + "U") - case !initv && !wantv && jobV != lapack.GSVDNone: - panic(badGSVDJob + "V") - case !initq && !wantq && jobQ != lapack.GSVDNone: - panic(badGSVDJob + "Q") - case m < 0: - panic(mLT0) - case p < 0: - panic(pLT0) - case n < 0: - panic(nLT0) - - case lda < max(1, n): - panic(badLdA) - case len(a) < (m-1)*lda+n: - panic(shortA) - - case ldb < max(1, n): - panic(badLdB) - case len(b) < (p-1)*ldb+n: - panic(shortB) - - case len(alpha) != n: - panic(badLenAlpha) - case len(beta) != n: - panic(badLenBeta) - - case ldu < 1, wantu && ldu < m: - panic(badLdU) - case wantu && len(u) < (m-1)*ldu+m: - panic(shortU) - - case ldv < 1, wantv && ldv < p: - panic(badLdV) - case wantv && len(v) < (p-1)*ldv+p: - panic(shortV) - - case ldq < 1, wantq && ldq < n: - panic(badLdQ) - case wantq && len(q) < (n-1)*ldq+n: - panic(shortQ) - - case len(work) < 2*n: - panic(shortWork) - } - - // Initialize U, V and Q, if necessary - if initu { - impl.Dlaset(blas.All, m, m, 0, 1, u, ldu) - } - if initv { - impl.Dlaset(blas.All, p, p, 0, 1, v, ldv) - } - if initq { - impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) - } - - bi := blas64.Implementation() - minTol := math.Min(tola, tolb) - - // Loop until convergence. - upper := false - for cycles = 1; cycles <= maxit; cycles++ { - upper = !upper - - for i := 0; i < l-1; i++ { - for j := i + 1; j < l; j++ { - var a1, a2, a3 float64 - if k+i < m { - a1 = a[(k+i)*lda+n-l+i] - } - if k+j < m { - a3 = a[(k+j)*lda+n-l+j] - } - - b1 := b[i*ldb+n-l+i] - b3 := b[j*ldb+n-l+j] - - var b2 float64 - if upper { - if k+i < m { - a2 = a[(k+i)*lda+n-l+j] - } - b2 = b[i*ldb+n-l+j] - } else { - if k+j < m { - a2 = a[(k+j)*lda+n-l+i] - } - b2 = b[j*ldb+n-l+i] - } - - csu, snu, csv, snv, csq, snq := impl.Dlags2(upper, a1, a2, a3, b1, b2, b3) - - // Update (k+i)-th and (k+j)-th rows of matrix A: U^T*A. - if k+j < m { - bi.Drot(l, a[(k+j)*lda+n-l:], 1, a[(k+i)*lda+n-l:], 1, csu, snu) - } - - // Update i-th and j-th rows of matrix B: V^T*B. - bi.Drot(l, b[j*ldb+n-l:], 1, b[i*ldb+n-l:], 1, csv, snv) - - // Update (n-l+i)-th and (n-l+j)-th columns of matrices - // A and B: A*Q and B*Q. - bi.Drot(min(k+l, m), a[n-l+j:], lda, a[n-l+i:], lda, csq, snq) - bi.Drot(l, b[n-l+j:], ldb, b[n-l+i:], ldb, csq, snq) - - if upper { - if k+i < m { - a[(k+i)*lda+n-l+j] = 0 - } - b[i*ldb+n-l+j] = 0 - } else { - if k+j < m { - a[(k+j)*lda+n-l+i] = 0 - } - b[j*ldb+n-l+i] = 0 - } - - // Update orthogonal matrices U, V, Q, if desired. - if wantu && k+j < m { - bi.Drot(m, u[k+j:], ldu, u[k+i:], ldu, csu, snu) - } - if wantv { - bi.Drot(p, v[j:], ldv, v[i:], ldv, csv, snv) - } - if wantq { - bi.Drot(n, q[n-l+j:], ldq, q[n-l+i:], ldq, csq, snq) - } - } - } - - if !upper { - // The matrices A13 and B13 were lower triangular at the start - // of the cycle, and are now upper triangular. - // - // Convergence test: test the parallelism of the corresponding - // rows of A and B. - var error float64 - for i := 0; i < min(l, m-k); i++ { - bi.Dcopy(l-i, a[(k+i)*lda+n-l+i:], 1, work, 1) - bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, work[l:], 1) - ssmin := impl.Dlapll(l-i, work, 1, work[l:], 1) - error = math.Max(error, ssmin) - } - if math.Abs(error) <= minTol { - // The algorithm has converged. - // Compute the generalized singular value pairs (alpha, beta) - // and set the triangular matrix R to array A. - for i := 0; i < k; i++ { - alpha[i] = 1 - beta[i] = 0 - } - - for i := 0; i < min(l, m-k); i++ { - a1 := a[(k+i)*lda+n-l+i] - b1 := b[i*ldb+n-l+i] - - if a1 != 0 { - gamma := b1 / a1 - - // Change sign if necessary. - if gamma < 0 { - bi.Dscal(l-i, -1, b[i*ldb+n-l+i:], 1) - if wantv { - bi.Dscal(p, -1, v[i:], ldv) - } - } - beta[k+i], alpha[k+i], _ = impl.Dlartg(math.Abs(gamma), 1) - - if alpha[k+i] >= beta[k+i] { - bi.Dscal(l-i, 1/alpha[k+i], a[(k+i)*lda+n-l+i:], 1) - } else { - bi.Dscal(l-i, 1/beta[k+i], b[i*ldb+n-l+i:], 1) - bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) - } - } else { - alpha[k+i] = 0 - beta[k+i] = 1 - bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) - } - } - - for i := m; i < k+l; i++ { - alpha[i] = 0 - beta[i] = 1 - } - if k+l < n { - for i := k + l; i < n; i++ { - alpha[i] = 0 - beta[i] = 0 - } - } - - return cycles, true - } - } - } - - // The algorithm has not converged after maxit cycles. - return cycles, false -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go deleted file mode 100644 index 899c95dd5..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dtrcon estimates the reciprocal of the condition number of a triangular matrix A. -// The condition number computed may be based on the 1-norm or the ∞-norm. -// -// work is a temporary data slice of length at least 3*n and Dtrcon will panic otherwise. -// -// iwork is a temporary data slice of length at least n and Dtrcon will panic otherwise. -func (impl Implementation) Dtrcon(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 { - switch { - case norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum: - panic(badNorm) - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case diag != blas.NonUnit && diag != blas.Unit: - panic(badDiag) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - if n == 0 { - return 1 - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(work) < 3*n: - panic(shortWork) - case len(iwork) < n: - panic(shortIWork) - } - - bi := blas64.Implementation() - - var rcond float64 - smlnum := dlamchS * float64(n) - - anorm := impl.Dlantr(norm, uplo, diag, n, n, a, lda, work) - - if anorm <= 0 { - return rcond - } - var ainvnm float64 - var normin bool - kase1 := 2 - if norm == lapack.MaxColumnSum { - kase1 = 1 - } - var kase int - isave := new([3]int) - var scale float64 - for { - ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) - if kase == 0 { - if ainvnm != 0 { - rcond = (1 / anorm) / ainvnm - } - return rcond - } - if kase == kase1 { - scale = impl.Dlatrs(uplo, blas.NoTrans, diag, normin, n, a, lda, work, work[2*n:]) - } else { - scale = impl.Dlatrs(uplo, blas.Trans, diag, normin, n, a, lda, work, work[2*n:]) - } - normin = true - if scale != 1 { - ix := bi.Idamax(n, work, 1) - xnorm := math.Abs(work[ix]) - if scale == 0 || scale < xnorm*smlnum { - return rcond - } - impl.Drscl(n, scale, work, 1) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go deleted file mode 100644 index 17121b8db..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go +++ /dev/null @@ -1,885 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" -) - -// Dtrevc3 computes some or all of the right and/or left eigenvectors of an n×n -// upper quasi-triangular matrix T in Schur canonical form. Matrices of this -// type are produced by the Schur factorization of a real general matrix A -// A = Q T Q^T, -// as computed by Dhseqr. -// -// The right eigenvector x of T corresponding to an -// eigenvalue λ is defined by -// T x = λ x, -// and the left eigenvector y is defined by -// y^T T = λ y^T. -// -// The eigenvalues are read directly from the diagonal blocks of T. -// -// This routine returns the matrices X and/or Y of right and left eigenvectors -// of T, or the products Q*X and/or Q*Y, where Q is an input matrix. If Q is the -// orthogonal factor that reduces a matrix A to Schur form T, then Q*X and Q*Y -// are the matrices of right and left eigenvectors of A. -// -// If side == lapack.EVRight, only right eigenvectors will be computed. -// If side == lapack.EVLeft, only left eigenvectors will be computed. -// If side == lapack.EVBoth, both right and left eigenvectors will be computed. -// For other values of side, Dtrevc3 will panic. -// -// If howmny == lapack.EVAll, all right and/or left eigenvectors will be -// computed. -// If howmny == lapack.EVAllMulQ, all right and/or left eigenvectors will be -// computed and multiplied from left by the matrices in VR and/or VL. -// If howmny == lapack.EVSelected, right and/or left eigenvectors will be -// computed as indicated by selected. -// For other values of howmny, Dtrevc3 will panic. -// -// selected specifies which eigenvectors will be computed. It must have length n -// if howmny == lapack.EVSelected, and it is not referenced otherwise. -// If w_j is a real eigenvalue, the corresponding real eigenvector will be -// computed if selected[j] is true. -// If w_j and w_{j+1} are the real and imaginary parts of a complex eigenvalue, -// the corresponding complex eigenvector is computed if either selected[j] or -// selected[j+1] is true, and on return selected[j] will be set to true and -// selected[j+1] will be set to false. -// -// VL and VR are n×mm matrices. If howmny is lapack.EVAll or -// lapack.AllEVMulQ, mm must be at least n. If howmny is -// lapack.EVSelected, mm must be large enough to store the selected -// eigenvectors. Each selected real eigenvector occupies one column and each -// selected complex eigenvector occupies two columns. If mm is not sufficiently -// large, Dtrevc3 will panic. -// -// On entry, if howmny is lapack.EVAllMulQ, it is assumed that VL (if side -// is lapack.EVLeft or lapack.EVBoth) contains an n×n matrix QL, -// and that VR (if side is lapack.EVLeft or lapack.EVBoth) contains -// an n×n matrix QR. QL and QR are typically the orthogonal matrix Q of Schur -// vectors returned by Dhseqr. -// -// On return, if side is lapack.EVLeft or lapack.EVBoth, -// VL will contain: -// if howmny == lapack.EVAll, the matrix Y of left eigenvectors of T, -// if howmny == lapack.EVAllMulQ, the matrix Q*Y, -// if howmny == lapack.EVSelected, the left eigenvectors of T specified by -// selected, stored consecutively in the -// columns of VL, in the same order as their -// eigenvalues. -// VL is not referenced if side == lapack.EVRight. -// -// On return, if side is lapack.EVRight or lapack.EVBoth, -// VR will contain: -// if howmny == lapack.EVAll, the matrix X of right eigenvectors of T, -// if howmny == lapack.EVAllMulQ, the matrix Q*X, -// if howmny == lapack.EVSelected, the left eigenvectors of T specified by -// selected, stored consecutively in the -// columns of VR, in the same order as their -// eigenvalues. -// VR is not referenced if side == lapack.EVLeft. -// -// Complex eigenvectors corresponding to a complex eigenvalue are stored in VL -// and VR in two consecutive columns, the first holding the real part, and the -// second the imaginary part. -// -// Each eigenvector will be normalized so that the element of largest magnitude -// has magnitude 1. Here the magnitude of a complex number (x,y) is taken to be -// |x| + |y|. -// -// work must have length at least lwork and lwork must be at least max(1,3*n), -// otherwise Dtrevc3 will panic. For optimum performance, lwork should be at -// least n+2*n*nb, where nb is the optimal blocksize. -// -// If lwork == -1, instead of performing Dtrevc3, the function only estimates -// the optimal workspace size based on n and stores it into work[0]. -// -// Dtrevc3 returns the number of columns in VL and/or VR actually used to store -// the eigenvectors. -// -// Dtrevc3 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dtrevc3(side lapack.EVSide, howmny lapack.EVHowMany, selected []bool, n int, t []float64, ldt int, vl []float64, ldvl int, vr []float64, ldvr int, mm int, work []float64, lwork int) (m int) { - bothv := side == lapack.EVBoth - rightv := side == lapack.EVRight || bothv - leftv := side == lapack.EVLeft || bothv - switch { - case !rightv && !leftv: - panic(badEVSide) - case howmny != lapack.EVAll && howmny != lapack.EVAllMulQ && howmny != lapack.EVSelected: - panic(badEVHowMany) - case n < 0: - panic(nLT0) - case ldt < max(1, n): - panic(badLdT) - case mm < 0: - panic(mmLT0) - case ldvl < 1: - // ldvl and ldvr are also checked below after the computation of - // m (number of columns of VL and VR) in case of howmny == EVSelected. - panic(badLdVL) - case ldvr < 1: - panic(badLdVR) - case lwork < max(1, 3*n) && lwork != -1: - panic(badLWork) - case len(work) < max(1, lwork): - panic(shortWork) - } - - // Quick return if possible. - if n == 0 { - work[0] = 1 - return 0 - } - - // Normally we don't check slice lengths until after the workspace - // query. However, even in case of the workspace query we need to - // compute and return the value of m, and since the computation accesses t, - // we put the length check of t here. - if len(t) < (n-1)*ldt+n { - panic(shortT) - } - - if howmny == lapack.EVSelected { - if len(selected) != n { - panic(badLenSelected) - } - // Set m to the number of columns required to store the selected - // eigenvectors, and standardize the slice selected. - // Each selected real eigenvector occupies one column and each - // selected complex eigenvector occupies two columns. - for j := 0; j < n; { - if j == n-1 || t[(j+1)*ldt+j] == 0 { - // Diagonal 1×1 block corresponding to a - // real eigenvalue. - if selected[j] { - m++ - } - j++ - } else { - // Diagonal 2×2 block corresponding to a - // complex eigenvalue. - if selected[j] || selected[j+1] { - selected[j] = true - selected[j+1] = false - m += 2 - } - j += 2 - } - } - } else { - m = n - } - if mm < m { - panic(badMm) - } - - // Quick return in case of a workspace query. - nb := impl.Ilaenv(1, "DTREVC", string(side)+string(howmny), n, -1, -1, -1) - if lwork == -1 { - work[0] = float64(n + 2*n*nb) - return m - } - - // Quick return if no eigenvectors were selected. - if m == 0 { - return 0 - } - - switch { - case leftv && ldvl < mm: - panic(badLdVL) - case leftv && len(vl) < (n-1)*ldvl+mm: - panic(shortVL) - - case rightv && ldvr < mm: - panic(badLdVR) - case rightv && len(vr) < (n-1)*ldvr+mm: - panic(shortVR) - } - - // Use blocked version of back-transformation if sufficient workspace. - // Zero-out the workspace to avoid potential NaN propagation. - const ( - nbmin = 8 - nbmax = 128 - ) - if howmny == lapack.EVAllMulQ && lwork >= n+2*n*nbmin { - nb = min((lwork-n)/(2*n), nbmax) - impl.Dlaset(blas.All, n, 1+2*nb, 0, 0, work[:n+2*nb*n], 1+2*nb) - } else { - nb = 1 - } - - // Set the constants to control overflow. - ulp := dlamchP - smlnum := float64(n) / ulp * dlamchS - bignum := (1 - ulp) / smlnum - - // Split work into a vector of column norms and an n×2*nb matrix b. - norms := work[:n] - ldb := 2 * nb - b := work[n : n+n*ldb] - - // Compute 1-norm of each column of strictly upper triangular part of T - // to control overflow in triangular solver. - norms[0] = 0 - for j := 1; j < n; j++ { - var cn float64 - for i := 0; i < j; i++ { - cn += math.Abs(t[i*ldt+j]) - } - norms[j] = cn - } - - bi := blas64.Implementation() - - var ( - x [4]float64 - - iv int // Index of column in current block. - is int - - // ip is used below to specify the real or complex eigenvalue: - // ip == 0, real eigenvalue, - // 1, first of conjugate complex pair (wr,wi), - // -1, second of conjugate complex pair (wr,wi). - ip int - iscomplex [nbmax]int // Stores ip for each column in current block. - ) - - if side == lapack.EVLeft { - goto leftev - } - - // Compute right eigenvectors. - - // For complex right vector, iv-1 is for real part and iv for complex - // part. Non-blocked version always uses iv=1, blocked version starts - // with iv=nb-1 and goes down to 0 or 1. - iv = max(2, nb) - 1 - ip = 0 - is = m - 1 - for ki := n - 1; ki >= 0; ki-- { - if ip == -1 { - // Previous iteration (ki+1) was second of - // conjugate pair, so this ki is first of - // conjugate pair. - ip = 1 - continue - } - - if ki == 0 || t[ki*ldt+ki-1] == 0 { - // Last column or zero on sub-diagonal, so this - // ki must be real eigenvalue. - ip = 0 - } else { - // Non-zero on sub-diagonal, so this ki is - // second of conjugate pair. - ip = -1 - } - - if howmny == lapack.EVSelected { - if ip == 0 { - if !selected[ki] { - continue - } - } else if !selected[ki-1] { - continue - } - } - - // Compute the ki-th eigenvalue (wr,wi). - wr := t[ki*ldt+ki] - var wi float64 - if ip != 0 { - wi = math.Sqrt(math.Abs(t[ki*ldt+ki-1])) * math.Sqrt(math.Abs(t[(ki-1)*ldt+ki])) - } - smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) - - if ip == 0 { - // Real right eigenvector. - - b[ki*ldb+iv] = 1 - // Form right-hand side. - for k := 0; k < ki; k++ { - b[k*ldb+iv] = -t[k*ldt+ki] - } - // Solve upper quasi-triangular system: - // [ T[0:ki,0:ki] - wr ]*X = scale*b. - for j := ki - 1; j >= 0; { - if j == 0 || t[j*ldt+j-1] == 0 { - // 1×1 diagonal block. - scale, xnorm, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, - 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) - // Scale X[0,0] to avoid overflow when updating the - // right-hand side. - if xnorm > 1 && norms[j] > bignum/xnorm { - x[0] /= xnorm - scale /= xnorm - } - // Scale if necessary. - if scale != 1 { - bi.Dscal(ki+1, scale, b[iv:], ldb) - } - b[j*ldb+iv] = x[0] - // Update right-hand side. - bi.Daxpy(j, -x[0], t[j:], ldt, b[iv:], ldb) - j-- - } else { - // 2×2 diagonal block. - scale, xnorm, _ := impl.Dlaln2(false, 2, 1, smin, 1, t[(j-1)*ldt+j-1:], ldt, - 1, 1, b[(j-1)*ldb+iv:], ldb, wr, 0, x[:3], 2) - // Scale X[0,0] and X[1,0] to avoid overflow - // when updating the right-hand side. - if xnorm > 1 { - beta := math.Max(norms[j-1], norms[j]) - if beta > bignum/xnorm { - x[0] /= xnorm - x[2] /= xnorm - scale /= xnorm - } - } - // Scale if necessary. - if scale != 1 { - bi.Dscal(ki+1, scale, b[iv:], ldb) - } - b[(j-1)*ldb+iv] = x[0] - b[j*ldb+iv] = x[2] - // Update right-hand side. - bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv:], ldb) - bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv:], ldb) - j -= 2 - } - } - // Copy the vector x or Q*x to VR and normalize. - switch { - case howmny != lapack.EVAllMulQ: - // No back-transform: copy x to VR and normalize. - bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) - ii := bi.Idamax(ki+1, vr[is:], ldvr) - remax := 1 / math.Abs(vr[ii*ldvr+is]) - bi.Dscal(ki+1, remax, vr[is:], ldvr) - for k := ki + 1; k < n; k++ { - vr[k*ldvr+is] = 0 - } - case nb == 1: - // Version 1: back-transform each vector with GEMV, Q*x. - if ki > 0 { - bi.Dgemv(blas.NoTrans, n, ki, 1, vr, ldvr, b[iv:], ldb, - b[ki*ldb+iv], vr[ki:], ldvr) - } - ii := bi.Idamax(n, vr[ki:], ldvr) - remax := 1 / math.Abs(vr[ii*ldvr+ki]) - bi.Dscal(n, remax, vr[ki:], ldvr) - default: - // Version 2: back-transform block of vectors with GEMM. - // Zero out below vector. - for k := ki + 1; k < n; k++ { - b[k*ldb+iv] = 0 - } - iscomplex[iv] = ip - // Back-transform and normalization is done below. - } - } else { - // Complex right eigenvector. - - // Initial solve - // [ ( T[ki-1,ki-1] T[ki-1,ki] ) - (wr + i*wi) ]*X = 0. - // [ ( T[ki, ki-1] T[ki, ki] ) ] - if math.Abs(t[(ki-1)*ldt+ki]) >= math.Abs(t[ki*ldt+ki-1]) { - b[(ki-1)*ldb+iv-1] = 1 - b[ki*ldb+iv] = wi / t[(ki-1)*ldt+ki] - } else { - b[(ki-1)*ldb+iv-1] = -wi / t[ki*ldt+ki-1] - b[ki*ldb+iv] = 1 - } - b[ki*ldb+iv-1] = 0 - b[(ki-1)*ldb+iv] = 0 - // Form right-hand side. - for k := 0; k < ki-1; k++ { - b[k*ldb+iv-1] = -b[(ki-1)*ldb+iv-1] * t[k*ldt+ki-1] - b[k*ldb+iv] = -b[ki*ldb+iv] * t[k*ldt+ki] - } - // Solve upper quasi-triangular system: - // [ T[0:ki-1,0:ki-1] - (wr+i*wi) ]*X = scale*(b1+i*b2) - for j := ki - 2; j >= 0; { - if j == 0 || t[j*ldt+j-1] == 0 { - // 1×1 diagonal block. - - scale, xnorm, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, - 1, 1, b[j*ldb+iv-1:], ldb, wr, wi, x[:2], 2) - // Scale X[0,0] and X[0,1] to avoid - // overflow when updating the right-hand side. - if xnorm > 1 && norms[j] > bignum/xnorm { - x[0] /= xnorm - x[1] /= xnorm - scale /= xnorm - } - // Scale if necessary. - if scale != 1 { - bi.Dscal(ki+1, scale, b[iv-1:], ldb) - bi.Dscal(ki+1, scale, b[iv:], ldb) - } - b[j*ldb+iv-1] = x[0] - b[j*ldb+iv] = x[1] - // Update the right-hand side. - bi.Daxpy(j, -x[0], t[j:], ldt, b[iv-1:], ldb) - bi.Daxpy(j, -x[1], t[j:], ldt, b[iv:], ldb) - j-- - } else { - // 2×2 diagonal block. - - scale, xnorm, _ := impl.Dlaln2(false, 2, 2, smin, 1, t[(j-1)*ldt+j-1:], ldt, - 1, 1, b[(j-1)*ldb+iv-1:], ldb, wr, wi, x[:], 2) - // Scale X to avoid overflow when updating - // the right-hand side. - if xnorm > 1 { - beta := math.Max(norms[j-1], norms[j]) - if beta > bignum/xnorm { - rec := 1 / xnorm - x[0] *= rec - x[1] *= rec - x[2] *= rec - x[3] *= rec - scale *= rec - } - } - // Scale if necessary. - if scale != 1 { - bi.Dscal(ki+1, scale, b[iv-1:], ldb) - bi.Dscal(ki+1, scale, b[iv:], ldb) - } - b[(j-1)*ldb+iv-1] = x[0] - b[(j-1)*ldb+iv] = x[1] - b[j*ldb+iv-1] = x[2] - b[j*ldb+iv] = x[3] - // Update the right-hand side. - bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv-1:], ldb) - bi.Daxpy(j-1, -x[1], t[j-1:], ldt, b[iv:], ldb) - bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv-1:], ldb) - bi.Daxpy(j-1, -x[3], t[j:], ldt, b[iv:], ldb) - j -= 2 - } - } - - // Copy the vector x or Q*x to VR and normalize. - switch { - case howmny != lapack.EVAllMulQ: - // No back-transform: copy x to VR and normalize. - bi.Dcopy(ki+1, b[iv-1:], ldb, vr[is-1:], ldvr) - bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) - emax := 0.0 - for k := 0; k <= ki; k++ { - emax = math.Max(emax, math.Abs(vr[k*ldvr+is-1])+math.Abs(vr[k*ldvr+is])) - } - remax := 1 / emax - bi.Dscal(ki+1, remax, vr[is-1:], ldvr) - bi.Dscal(ki+1, remax, vr[is:], ldvr) - for k := ki + 1; k < n; k++ { - vr[k*ldvr+is-1] = 0 - vr[k*ldvr+is] = 0 - } - case nb == 1: - // Version 1: back-transform each vector with GEMV, Q*x. - if ki-1 > 0 { - bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv-1:], ldb, - b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) - bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv:], ldb, - b[ki*ldb+iv], vr[ki:], ldvr) - } else { - bi.Dscal(n, b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) - bi.Dscal(n, b[ki*ldb+iv], vr[ki:], ldvr) - } - emax := 0.0 - for k := 0; k < n; k++ { - emax = math.Max(emax, math.Abs(vr[k*ldvr+ki-1])+math.Abs(vr[k*ldvr+ki])) - } - remax := 1 / emax - bi.Dscal(n, remax, vr[ki-1:], ldvr) - bi.Dscal(n, remax, vr[ki:], ldvr) - default: - // Version 2: back-transform block of vectors with GEMM. - // Zero out below vector. - for k := ki + 1; k < n; k++ { - b[k*ldb+iv-1] = 0 - b[k*ldb+iv] = 0 - } - iscomplex[iv-1] = -ip - iscomplex[iv] = ip - iv-- - // Back-transform and normalization is done below. - } - } - if nb > 1 { - // Blocked version of back-transform. - - // For complex case, ki2 includes both vectors (ki-1 and ki). - ki2 := ki - if ip != 0 { - ki2-- - } - // Columns iv:nb of b are valid vectors. - // When the number of vectors stored reaches nb-1 or nb, - // or if this was last vector, do the Gemm. - if iv < 2 || ki2 == 0 { - bi.Dgemm(blas.NoTrans, blas.NoTrans, n, nb-iv, ki2+nb-iv, - 1, vr, ldvr, b[iv:], ldb, - 0, b[nb+iv:], ldb) - // Normalize vectors. - var remax float64 - for k := iv; k < nb; k++ { - if iscomplex[k] == 0 { - // Real eigenvector. - ii := bi.Idamax(n, b[nb+k:], ldb) - remax = 1 / math.Abs(b[ii*ldb+nb+k]) - } else if iscomplex[k] == 1 { - // First eigenvector of conjugate pair. - emax := 0.0 - for ii := 0; ii < n; ii++ { - emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) - } - remax = 1 / emax - // Second eigenvector of conjugate pair - // will reuse this value of remax. - } - bi.Dscal(n, remax, b[nb+k:], ldb) - } - impl.Dlacpy(blas.All, n, nb-iv, b[nb+iv:], ldb, vr[ki2:], ldvr) - iv = nb - 1 - } else { - iv-- - } - } - is-- - if ip != 0 { - is-- - } - } - - if side == lapack.EVRight { - return m - } - -leftev: - // Compute left eigenvectors. - - // For complex left vector, iv is for real part and iv+1 for complex - // part. Non-blocked version always uses iv=0. Blocked version starts - // with iv=0, goes up to nb-2 or nb-1. - iv = 0 - ip = 0 - is = 0 - for ki := 0; ki < n; ki++ { - if ip == 1 { - // Previous iteration ki-1 was first of conjugate pair, - // so this ki is second of conjugate pair. - ip = -1 - continue - } - - if ki == n-1 || t[(ki+1)*ldt+ki] == 0 { - // Last column or zero on sub-diagonal, so this ki must - // be real eigenvalue. - ip = 0 - } else { - // Non-zero on sub-diagonal, so this ki is first of - // conjugate pair. - ip = 1 - } - if howmny == lapack.EVSelected && !selected[ki] { - continue - } - - // Compute the ki-th eigenvalue (wr,wi). - wr := t[ki*ldt+ki] - var wi float64 - if ip != 0 { - wi = math.Sqrt(math.Abs(t[ki*ldt+ki+1])) * math.Sqrt(math.Abs(t[(ki+1)*ldt+ki])) - } - smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) - - if ip == 0 { - // Real left eigenvector. - - b[ki*ldb+iv] = 1 - // Form right-hand side. - for k := ki + 1; k < n; k++ { - b[k*ldb+iv] = -t[ki*ldt+k] - } - // Solve transposed quasi-triangular system: - // [ T[ki+1:n,ki+1:n] - wr ]^T * X = scale*b - vmax := 1.0 - vcrit := bignum - for j := ki + 1; j < n; { - if j == n-1 || t[(j+1)*ldt+j] == 0 { - // 1×1 diagonal block. - - // Scale if necessary to avoid overflow - // when forming the right-hand side. - if norms[j] > vcrit { - rec := 1 / vmax - bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) - vmax = 1 - } - b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) - // Solve [ T[j,j] - wr ]^T * X = b. - scale, _, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, - 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) - // Scale if necessary. - if scale != 1 { - bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) - } - b[j*ldb+iv] = x[0] - vmax = math.Max(math.Abs(b[j*ldb+iv]), vmax) - vcrit = bignum / vmax - j++ - } else { - // 2×2 diagonal block. - - // Scale if necessary to avoid overflow - // when forming the right-hand side. - beta := math.Max(norms[j], norms[j+1]) - if beta > vcrit { - bi.Dscal(n-ki+1, 1/vmax, b[ki*ldb+iv:], 1) - vmax = 1 - } - b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) - b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j+1:], ldt, b[(ki+1)*ldb+iv:], ldb) - // Solve - // [ T[j,j]-wr T[j,j+1] ]^T * X = scale*[ b1 ] - // [ T[j+1,j] T[j+1,j+1]-wr ] [ b2 ] - scale, _, _ := impl.Dlaln2(true, 2, 1, smin, 1, t[j*ldt+j:], ldt, - 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:3], 2) - // Scale if necessary. - if scale != 1 { - bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) - } - b[j*ldb+iv] = x[0] - b[(j+1)*ldb+iv] = x[2] - vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[(j+1)*ldb+iv]))) - vcrit = bignum / vmax - j += 2 - } - } - // Copy the vector x or Q*x to VL and normalize. - switch { - case howmny != lapack.EVAllMulQ: - // No back-transform: copy x to VL and normalize. - bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) - ii := bi.Idamax(n-ki, vl[ki*ldvl+is:], ldvl) + ki - remax := 1 / math.Abs(vl[ii*ldvl+is]) - bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) - for k := 0; k < ki; k++ { - vl[k*ldvl+is] = 0 - } - case nb == 1: - // Version 1: back-transform each vector with Gemv, Q*x. - if n-ki-1 > 0 { - bi.Dgemv(blas.NoTrans, n, n-ki-1, - 1, vl[ki+1:], ldvl, b[(ki+1)*ldb+iv:], ldb, - b[ki*ldb+iv], vl[ki:], ldvl) - } - ii := bi.Idamax(n, vl[ki:], ldvl) - remax := 1 / math.Abs(vl[ii*ldvl+ki]) - bi.Dscal(n, remax, vl[ki:], ldvl) - default: - // Version 2: back-transform block of vectors with Gemm - // zero out above vector. - for k := 0; k < ki; k++ { - b[k*ldb+iv] = 0 - } - iscomplex[iv] = ip - // Back-transform and normalization is done below. - } - } else { - // Complex left eigenvector. - - // Initial solve: - // [ [ T[ki,ki] T[ki,ki+1] ]^T - (wr - i* wi) ]*X = 0. - // [ [ T[ki+1,ki] T[ki+1,ki+1] ] ] - if math.Abs(t[ki*ldt+ki+1]) >= math.Abs(t[(ki+1)*ldt+ki]) { - b[ki*ldb+iv] = wi / t[ki*ldt+ki+1] - b[(ki+1)*ldb+iv+1] = 1 - } else { - b[ki*ldb+iv] = 1 - b[(ki+1)*ldb+iv+1] = -wi / t[(ki+1)*ldt+ki] - } - b[(ki+1)*ldb+iv] = 0 - b[ki*ldb+iv+1] = 0 - // Form right-hand side. - for k := ki + 2; k < n; k++ { - b[k*ldb+iv] = -b[ki*ldb+iv] * t[ki*ldt+k] - b[k*ldb+iv+1] = -b[(ki+1)*ldb+iv+1] * t[(ki+1)*ldt+k] - } - // Solve transposed quasi-triangular system: - // [ T[ki+2:n,ki+2:n]^T - (wr-i*wi) ]*X = b1+i*b2 - vmax := 1.0 - vcrit := bignum - for j := ki + 2; j < n; { - if j == n-1 || t[(j+1)*ldt+j] == 0 { - // 1×1 diagonal block. - - // Scale if necessary to avoid overflow - // when forming the right-hand side elements. - if norms[j] > vcrit { - rec := 1 / vmax - bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) - bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) - vmax = 1 - } - b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) - b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) - // Solve [ T[j,j]-(wr-i*wi) ]*(X11+i*X12) = b1+i*b2. - scale, _, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, - 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:2], 2) - // Scale if necessary. - if scale != 1 { - bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) - bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) - } - b[j*ldb+iv] = x[0] - b[j*ldb+iv+1] = x[1] - vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[j*ldb+iv+1]))) - vcrit = bignum / vmax - j++ - } else { - // 2×2 diagonal block. - - // Scale if necessary to avoid overflow - // when forming the right-hand side elements. - if math.Max(norms[j], norms[j+1]) > vcrit { - rec := 1 / vmax - bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) - bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) - vmax = 1 - } - b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) - b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) - b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv:], ldb) - b[(j+1)*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv+1:], ldb) - // Solve 2×2 complex linear equation - // [ [T[j,j] T[j,j+1] ]^T - (wr-i*wi)*I ]*X = scale*b - // [ [T[j+1,j] T[j+1,j+1]] ] - scale, _, _ := impl.Dlaln2(true, 2, 2, smin, 1, t[j*ldt+j:], ldt, - 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:], 2) - // Scale if necessary. - if scale != 1 { - bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) - bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) - } - b[j*ldb+iv] = x[0] - b[j*ldb+iv+1] = x[1] - b[(j+1)*ldb+iv] = x[2] - b[(j+1)*ldb+iv+1] = x[3] - vmax01 := math.Max(math.Abs(x[0]), math.Abs(x[1])) - vmax23 := math.Max(math.Abs(x[2]), math.Abs(x[3])) - vmax = math.Max(vmax, math.Max(vmax01, vmax23)) - vcrit = bignum / vmax - j += 2 - } - } - // Copy the vector x or Q*x to VL and normalize. - switch { - case howmny != lapack.EVAllMulQ: - // No back-transform: copy x to VL and normalize. - bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) - bi.Dcopy(n-ki, b[ki*ldb+iv+1:], ldb, vl[ki*ldvl+is+1:], ldvl) - emax := 0.0 - for k := ki; k < n; k++ { - emax = math.Max(emax, math.Abs(vl[k*ldvl+is])+math.Abs(vl[k*ldvl+is+1])) - } - remax := 1 / emax - bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) - bi.Dscal(n-ki, remax, vl[ki*ldvl+is+1:], ldvl) - for k := 0; k < ki; k++ { - vl[k*ldvl+is] = 0 - vl[k*ldvl+is+1] = 0 - } - case nb == 1: - // Version 1: back-transform each vector with GEMV, Q*x. - if n-ki-2 > 0 { - bi.Dgemv(blas.NoTrans, n, n-ki-2, - 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv:], ldb, - b[ki*ldb+iv], vl[ki:], ldvl) - bi.Dgemv(blas.NoTrans, n, n-ki-2, - 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv+1:], ldb, - b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) - } else { - bi.Dscal(n, b[ki*ldb+iv], vl[ki:], ldvl) - bi.Dscal(n, b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) - } - emax := 0.0 - for k := 0; k < n; k++ { - emax = math.Max(emax, math.Abs(vl[k*ldvl+ki])+math.Abs(vl[k*ldvl+ki+1])) - } - remax := 1 / emax - bi.Dscal(n, remax, vl[ki:], ldvl) - bi.Dscal(n, remax, vl[ki+1:], ldvl) - default: - // Version 2: back-transform block of vectors with GEMM. - // Zero out above vector. - // Could go from ki-nv+1 to ki-1. - for k := 0; k < ki; k++ { - b[k*ldb+iv] = 0 - b[k*ldb+iv+1] = 0 - } - iscomplex[iv] = ip - iscomplex[iv+1] = -ip - iv++ - // Back-transform and normalization is done below. - } - } - if nb > 1 { - // Blocked version of back-transform. - // For complex case, ki2 includes both vectors ki and ki+1. - ki2 := ki - if ip != 0 { - ki2++ - } - // Columns [0:iv] of work are valid vectors. When the - // number of vectors stored reaches nb-1 or nb, or if - // this was last vector, do the Gemm. - if iv >= nb-2 || ki2 == n-1 { - bi.Dgemm(blas.NoTrans, blas.NoTrans, n, iv+1, n-ki2+iv, - 1, vl[ki2-iv:], ldvl, b[(ki2-iv)*ldb:], ldb, - 0, b[nb:], ldb) - // Normalize vectors. - var remax float64 - for k := 0; k <= iv; k++ { - if iscomplex[k] == 0 { - // Real eigenvector. - ii := bi.Idamax(n, b[nb+k:], ldb) - remax = 1 / math.Abs(b[ii*ldb+nb+k]) - } else if iscomplex[k] == 1 { - // First eigenvector of conjugate pair. - emax := 0.0 - for ii := 0; ii < n; ii++ { - emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) - } - remax = 1 / emax - // Second eigenvector of conjugate pair - // will reuse this value of remax. - } - bi.Dscal(n, remax, b[nb+k:], ldb) - } - impl.Dlacpy(blas.All, n, iv+1, b[nb:], ldb, vl[ki2-iv:], ldvl) - iv = 0 - } else { - iv++ - } - } - is++ - if ip != 0 { - is++ - } - } - - return m -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go deleted file mode 100644 index 9f3f90bad..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/lapack" - -// Dtrexc reorders the real Schur factorization of a n×n real matrix -// A = Q*T*Q^T -// so that the diagonal block of T with row index ifst is moved to row ilst. -// -// On entry, T must be in Schur canonical form, that is, block upper triangular -// with 1×1 and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal -// elements equal and its off-diagonal elements of opposite sign. -// -// On return, T will be reordered by an orthogonal similarity transformation Z -// as Z^T*T*Z, and will be again in Schur canonical form. -// -// If compq is lapack.UpdateSchur, on return the matrix Q of Schur vectors will be -// updated by post-multiplying it with Z. -// If compq is lapack.UpdateSchurNone, the matrix Q is not referenced and will not be -// updated. -// For other values of compq Dtrexc will panic. -// -// ifst and ilst specify the reordering of the diagonal blocks of T. The block -// with row index ifst is moved to row ilst, by a sequence of transpositions -// between adjacent blocks. -// -// If ifst points to the second row of a 2×2 block, ifstOut will point to the -// first row, otherwise it will be equal to ifst. -// -// ilstOut will point to the first row of the block in its final position. If ok -// is true, ilstOut may differ from ilst by +1 or -1. -// -// It must hold that -// 0 <= ifst < n, and 0 <= ilst < n, -// otherwise Dtrexc will panic. -// -// If ok is false, two adjacent blocks were too close to swap because the -// problem is very ill-conditioned. T may have been partially reordered, and -// ilstOut will point to the first row of the block at the position to which it -// has been moved. -// -// work must have length at least n, otherwise Dtrexc will panic. -// -// Dtrexc is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dtrexc(compq lapack.UpdateSchurComp, n int, t []float64, ldt int, q []float64, ldq int, ifst, ilst int, work []float64) (ifstOut, ilstOut int, ok bool) { - switch { - case compq != lapack.UpdateSchur && compq != lapack.UpdateSchurNone: - panic(badUpdateSchurComp) - case n < 0: - panic(nLT0) - case ldt < max(1, n): - panic(badLdT) - case ldq < 1, compq == lapack.UpdateSchur && ldq < n: - panic(badLdQ) - case (ifst < 0 || n <= ifst) && n > 0: - panic(badIfst) - case (ilst < 0 || n <= ilst) && n > 0: - panic(badIlst) - } - - // Quick return if possible. - if n == 0 { - return ifst, ilst, true - } - - switch { - case len(t) < (n-1)*ldt+n: - panic(shortT) - case compq == lapack.UpdateSchur && len(q) < (n-1)*ldq+n: - panic(shortQ) - case len(work) < n: - panic(shortWork) - } - - // Quick return if possible. - if n == 1 { - return ifst, ilst, true - } - - // Determine the first row of specified block - // and find out it is 1×1 or 2×2. - if ifst > 0 && t[ifst*ldt+ifst-1] != 0 { - ifst-- - } - nbf := 1 // Size of the first block. - if ifst+1 < n && t[(ifst+1)*ldt+ifst] != 0 { - nbf = 2 - } - // Determine the first row of the final block - // and find out it is 1×1 or 2×2. - if ilst > 0 && t[ilst*ldt+ilst-1] != 0 { - ilst-- - } - nbl := 1 // Size of the last block. - if ilst+1 < n && t[(ilst+1)*ldt+ilst] != 0 { - nbl = 2 - } - - ok = true - wantq := compq == lapack.UpdateSchur - - switch { - case ifst == ilst: - return ifst, ilst, true - - case ifst < ilst: - // Update ilst. - switch { - case nbf == 2 && nbl == 1: - ilst-- - case nbf == 1 && nbl == 2: - ilst++ - } - here := ifst - for here < ilst { - // Swap block with next one below. - if nbf == 1 || nbf == 2 { - // Current block either 1×1 or 2×2. - nbnext := 1 // Size of the next block. - if here+nbf+1 < n && t[(here+nbf+1)*ldt+here+nbf] != 0 { - nbnext = 2 - } - ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbf, nbnext, work) - if !ok { - return ifst, here, false - } - here += nbnext - // Test if 2×2 block breaks into two 1×1 blocks. - if nbf == 2 && t[(here+1)*ldt+here] == 0 { - nbf = 3 - } - continue - } - - // Current block consists of two 1×1 blocks each of - // which must be swapped individually. - nbnext := 1 // Size of the next block. - if here+3 < n && t[(here+3)*ldt+here+2] != 0 { - nbnext = 2 - } - ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, nbnext, work) - if !ok { - return ifst, here, false - } - if nbnext == 1 { - // Swap two 1×1 blocks, no problems possible. - impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) - here++ - continue - } - // Recompute nbnext in case 2×2 split. - if t[(here+2)*ldt+here+1] == 0 { - nbnext = 1 - } - if nbnext == 2 { - // 2×2 block did not split. - ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) - if !ok { - return ifst, here, false - } - } else { - // 2×2 block did split. - impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) - impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, 1, work) - } - here += 2 - } - return ifst, here, true - - default: // ifst > ilst - here := ifst - for here > ilst { - // Swap block with next one above. - if nbf == 1 || nbf == 2 { - // Current block either 1×1 or 2×2. - nbnext := 1 - if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { - nbnext = 2 - } - ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, nbf, work) - if !ok { - return ifst, here, false - } - here -= nbnext - // Test if 2×2 block breaks into two 1×1 blocks. - if nbf == 2 && t[(here+1)*ldt+here] == 0 { - nbf = 3 - } - continue - } - - // Current block consists of two 1×1 blocks each of - // which must be swapped individually. - nbnext := 1 - if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { - nbnext = 2 - } - ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, 1, work) - if !ok { - return ifst, here, false - } - if nbnext == 1 { - // Swap two 1×1 blocks, no problems possible. - impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbnext, 1, work) - here-- - continue - } - // Recompute nbnext in case 2×2 split. - if t[here*ldt+here-1] == 0 { - nbnext = 1 - } - if nbnext == 2 { - // 2×2 block did not split. - ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 2, 1, work) - if !ok { - return ifst, here, false - } - } else { - // 2×2 block did split. - impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) - impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 1, 1, work) - } - here -= 2 - } - return ifst, here, true - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go deleted file mode 100644 index efc24b65e..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dtrti2 computes the inverse of a triangular matrix, storing the result in place -// into a. This is the BLAS level 2 version of the algorithm. -// -// Dtrti2 is an internal routine. It is exported for testing purposes. -func (impl Implementation) Dtrti2(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case diag != blas.NonUnit && diag != blas.Unit: - panic(badDiag) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - if n == 0 { - return - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - bi := blas64.Implementation() - - nonUnit := diag == blas.NonUnit - // TODO(btracey): Replace this with a row-major ordering. - if uplo == blas.Upper { - for j := 0; j < n; j++ { - var ajj float64 - if nonUnit { - ajj = 1 / a[j*lda+j] - a[j*lda+j] = ajj - ajj *= -1 - } else { - ajj = -1 - } - bi.Dtrmv(blas.Upper, blas.NoTrans, diag, j, a, lda, a[j:], lda) - bi.Dscal(j, ajj, a[j:], lda) - } - return - } - for j := n - 1; j >= 0; j-- { - var ajj float64 - if nonUnit { - ajj = 1 / a[j*lda+j] - a[j*lda+j] = ajj - ajj *= -1 - } else { - ajj = -1 - } - if j < n-1 { - bi.Dtrmv(blas.Lower, blas.NoTrans, diag, n-j-1, a[(j+1)*lda+j+1:], lda, a[(j+1)*lda+j:], lda) - bi.Dscal(n-j-1, ajj, a[(j+1)*lda+j:], lda) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go deleted file mode 100644 index 6ec3663c3..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dtrtri computes the inverse of a triangular matrix, storing the result in place -// into a. This is the BLAS level 3 version of the algorithm which builds upon -// Dtrti2 to operate on matrix blocks instead of only individual columns. -// -// Dtrtri will not perform the inversion if the matrix is singular, and returns -// a boolean indicating whether the inversion was successful. -func (impl Implementation) Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case diag != blas.NonUnit && diag != blas.Unit: - panic(badDiag) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - if n == 0 { - return true - } - - if len(a) < (n-1)*lda+n { - panic(shortA) - } - - if diag == blas.NonUnit { - for i := 0; i < n; i++ { - if a[i*lda+i] == 0 { - return false - } - } - } - - bi := blas64.Implementation() - - nb := impl.Ilaenv(1, "DTRTRI", "UD", n, -1, -1, -1) - if nb <= 1 || nb > n { - impl.Dtrti2(uplo, diag, n, a, lda) - return true - } - if uplo == blas.Upper { - for j := 0; j < n; j += nb { - jb := min(nb, n-j) - bi.Dtrmm(blas.Left, blas.Upper, blas.NoTrans, diag, j, jb, 1, a, lda, a[j:], lda) - bi.Dtrsm(blas.Right, blas.Upper, blas.NoTrans, diag, j, jb, -1, a[j*lda+j:], lda, a[j:], lda) - impl.Dtrti2(blas.Upper, diag, jb, a[j*lda+j:], lda) - } - return true - } - nn := ((n - 1) / nb) * nb - for j := nn; j >= 0; j -= nb { - jb := min(nb, n-j) - if j+jb <= n-1 { - bi.Dtrmm(blas.Left, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, 1, a[(j+jb)*lda+j+jb:], lda, a[(j+jb)*lda+j:], lda) - bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, -1, a[j*lda+j:], lda, a[(j+jb)*lda+j:], lda) - } - impl.Dtrti2(blas.Lower, diag, jb, a[j*lda+j:], lda) - } - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go deleted file mode 100644 index 1752dc5c8..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -// Dtrtrs solves a triangular system of the form A * X = B or A^T * X = B. Dtrtrs -// returns whether the solve completed successfully. If A is singular, no solve is performed. -func (impl Implementation) Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) { - switch { - case uplo != blas.Upper && uplo != blas.Lower: - panic(badUplo) - case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: - panic(badTrans) - case diag != blas.NonUnit && diag != blas.Unit: - panic(badDiag) - case n < 0: - panic(nLT0) - case nrhs < 0: - panic(nrhsLT0) - case lda < max(1, n): - panic(badLdA) - case ldb < max(1, nrhs): - panic(badLdB) - } - - if n == 0 { - return true - } - - switch { - case len(a) < (n-1)*lda+n: - panic(shortA) - case len(b) < (n-1)*ldb+nrhs: - panic(shortB) - } - - // Check for singularity. - nounit := diag == blas.NonUnit - if nounit { - for i := 0; i < n; i++ { - if a[i*lda+i] == 0 { - return false - } - } - } - bi := blas64.Implementation() - bi.Dtrsm(blas.Left, uplo, trans, diag, n, nrhs, 1, a, lda, b, ldb) - return true -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go b/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go deleted file mode 100644 index 3c0cb68ef..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -// This list is duplicated in netlib/lapack/netlib. Keep in sync. -const ( - // Panic strings for bad enumeration values. - badApplyOrtho = "lapack: bad ApplyOrtho" - badBalanceJob = "lapack: bad BalanceJob" - badDiag = "lapack: bad Diag" - badDirect = "lapack: bad Direct" - badEVComp = "lapack: bad EVComp" - badEVHowMany = "lapack: bad EVHowMany" - badEVJob = "lapack: bad EVJob" - badEVSide = "lapack: bad EVSide" - badGSVDJob = "lapack: bad GSVDJob" - badGenOrtho = "lapack: bad GenOrtho" - badLeftEVJob = "lapack: bad LeftEVJob" - badMatrixType = "lapack: bad MatrixType" - badNorm = "lapack: bad Norm" - badPivot = "lapack: bad Pivot" - badRightEVJob = "lapack: bad RightEVJob" - badSVDJob = "lapack: bad SVDJob" - badSchurComp = "lapack: bad SchurComp" - badSchurJob = "lapack: bad SchurJob" - badSide = "lapack: bad Side" - badSort = "lapack: bad Sort" - badStoreV = "lapack: bad StoreV" - badTrans = "lapack: bad Trans" - badUpdateSchurComp = "lapack: bad UpdateSchurComp" - badUplo = "lapack: bad Uplo" - bothSVDOver = "lapack: both jobU and jobVT are lapack.SVDOverwrite" - - // Panic strings for bad numerical and string values. - badIfst = "lapack: ifst out of range" - badIhi = "lapack: ihi out of range" - badIhiz = "lapack: ihiz out of range" - badIlo = "lapack: ilo out of range" - badIloz = "lapack: iloz out of range" - badIlst = "lapack: ilst out of range" - badIsave = "lapack: bad isave value" - badIspec = "lapack: bad ispec value" - badJ1 = "lapack: j1 out of range" - badJpvt = "lapack: bad element of jpvt" - badK1 = "lapack: k1 out of range" - badK2 = "lapack: k2 out of range" - badKacc22 = "lapack: invalid value of kacc22" - badKbot = "lapack: kbot out of range" - badKtop = "lapack: ktop out of range" - badLWork = "lapack: insufficient declared workspace length" - badMm = "lapack: mm out of range" - badN1 = "lapack: bad value of n1" - badN2 = "lapack: bad value of n2" - badNa = "lapack: bad value of na" - badName = "lapack: bad name" - badNh = "lapack: bad value of nh" - badNw = "lapack: bad value of nw" - badPp = "lapack: bad value of pp" - badShifts = "lapack: bad shifts" - i0LT0 = "lapack: i0 < 0" - kGTM = "lapack: k > m" - kGTN = "lapack: k > n" - kLT0 = "lapack: k < 0" - kLT1 = "lapack: k < 1" - kdLT0 = "lapack: kd < 0" - mGTN = "lapack: m > n" - mLT0 = "lapack: m < 0" - mmLT0 = "lapack: mm < 0" - n0LT0 = "lapack: n0 < 0" - nGTM = "lapack: n > m" - nLT0 = "lapack: n < 0" - nLT1 = "lapack: n < 1" - nLTM = "lapack: n < m" - nanCFrom = "lapack: cfrom is NaN" - nanCTo = "lapack: cto is NaN" - nbGTM = "lapack: nb > m" - nbGTN = "lapack: nb > n" - nbLT0 = "lapack: nb < 0" - nccLT0 = "lapack: ncc < 0" - ncvtLT0 = "lapack: ncvt < 0" - negANorm = "lapack: anorm < 0" - negZ = "lapack: negative z value" - nhLT0 = "lapack: nh < 0" - notIsolated = "lapack: block is not isolated" - nrhsLT0 = "lapack: nrhs < 0" - nruLT0 = "lapack: nru < 0" - nshftsLT0 = "lapack: nshfts < 0" - nshftsOdd = "lapack: nshfts must be even" - nvLT0 = "lapack: nv < 0" - offsetGTM = "lapack: offset > m" - offsetLT0 = "lapack: offset < 0" - pLT0 = "lapack: p < 0" - recurLT0 = "lapack: recur < 0" - zeroCFrom = "lapack: zero cfrom" - - // Panic strings for bad slice lengths. - badLenAlpha = "lapack: bad length of alpha" - badLenBeta = "lapack: bad length of beta" - badLenIpiv = "lapack: bad length of ipiv" - badLenJpvt = "lapack: bad length of jpvt" - badLenK = "lapack: bad length of k" - badLenSelected = "lapack: bad length of selected" - badLenSi = "lapack: bad length of si" - badLenSr = "lapack: bad length of sr" - badLenTau = "lapack: bad length of tau" - badLenWi = "lapack: bad length of wi" - badLenWr = "lapack: bad length of wr" - - // Panic strings for insufficient slice lengths. - shortA = "lapack: insufficient length of a" - shortAB = "lapack: insufficient length of ab" - shortAuxv = "lapack: insufficient length of auxv" - shortB = "lapack: insufficient length of b" - shortC = "lapack: insufficient length of c" - shortCNorm = "lapack: insufficient length of cnorm" - shortD = "lapack: insufficient length of d" - shortE = "lapack: insufficient length of e" - shortF = "lapack: insufficient length of f" - shortH = "lapack: insufficient length of h" - shortIWork = "lapack: insufficient length of iwork" - shortIsgn = "lapack: insufficient length of isgn" - shortQ = "lapack: insufficient length of q" - shortS = "lapack: insufficient length of s" - shortScale = "lapack: insufficient length of scale" - shortT = "lapack: insufficient length of t" - shortTau = "lapack: insufficient length of tau" - shortTauP = "lapack: insufficient length of tauP" - shortTauQ = "lapack: insufficient length of tauQ" - shortU = "lapack: insufficient length of u" - shortV = "lapack: insufficient length of v" - shortVL = "lapack: insufficient length of vl" - shortVR = "lapack: insufficient length of vr" - shortVT = "lapack: insufficient length of vt" - shortVn1 = "lapack: insufficient length of vn1" - shortVn2 = "lapack: insufficient length of vn2" - shortW = "lapack: insufficient length of w" - shortWH = "lapack: insufficient length of wh" - shortWV = "lapack: insufficient length of wv" - shortWi = "lapack: insufficient length of wi" - shortWork = "lapack: insufficient length of work" - shortWr = "lapack: insufficient length of wr" - shortX = "lapack: insufficient length of x" - shortY = "lapack: insufficient length of y" - shortZ = "lapack: insufficient length of z" - - // Panic strings for bad leading dimensions of matrices. - badLdA = "lapack: bad leading dimension of A" - badLdB = "lapack: bad leading dimension of B" - badLdC = "lapack: bad leading dimension of C" - badLdF = "lapack: bad leading dimension of F" - badLdH = "lapack: bad leading dimension of H" - badLdQ = "lapack: bad leading dimension of Q" - badLdT = "lapack: bad leading dimension of T" - badLdU = "lapack: bad leading dimension of U" - badLdV = "lapack: bad leading dimension of V" - badLdVL = "lapack: bad leading dimension of VL" - badLdVR = "lapack: bad leading dimension of VR" - badLdVT = "lapack: bad leading dimension of VT" - badLdW = "lapack: bad leading dimension of W" - badLdWH = "lapack: bad leading dimension of WH" - badLdWV = "lapack: bad leading dimension of WV" - badLdWork = "lapack: bad leading dimension of Work" - badLdX = "lapack: bad leading dimension of X" - badLdY = "lapack: bad leading dimension of Y" - badLdZ = "lapack: bad leading dimension of Z" - - // Panic strings for bad vector increments. - absIncNotOne = "lapack: increment not one or negative one" - badIncX = "lapack: incX <= 0" - badIncY = "lapack: incY <= 0" - zeroIncV = "lapack: incv == 0" -) diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go deleted file mode 100644 index b251d7269..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -// Iladlc scans a matrix for its last non-zero column. Returns -1 if the matrix -// is all zeros. -// -// Iladlc is an internal routine. It is exported for testing purposes. -func (Implementation) Iladlc(m, n int, a []float64, lda int) int { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - if n == 0 || m == 0 { - return -1 - } - - if len(a) < (m-1)*lda+n { - panic(shortA) - } - - // Test common case where corner is non-zero. - if a[n-1] != 0 || a[(m-1)*lda+(n-1)] != 0 { - return n - 1 - } - - // Scan each row tracking the highest column seen. - highest := -1 - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - if a[i*lda+j] != 0 { - highest = max(highest, j) - break - } - } - } - return highest -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go deleted file mode 100644 index b73fe18ea..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -// Iladlr scans a matrix for its last non-zero row. Returns -1 if the matrix -// is all zeros. -// -// Iladlr is an internal routine. It is exported for testing purposes. -func (Implementation) Iladlr(m, n int, a []float64, lda int) int { - switch { - case m < 0: - panic(mLT0) - case n < 0: - panic(nLT0) - case lda < max(1, n): - panic(badLdA) - } - - if n == 0 || m == 0 { - return -1 - } - - if len(a) < (m-1)*lda+n { - panic(shortA) - } - - // Check the common case where the corner is non-zero - if a[(m-1)*lda] != 0 || a[(m-1)*lda+n-1] != 0 { - return m - 1 - } - for i := m - 1; i >= 0; i-- { - for j := 0; j < n; j++ { - if a[i*lda+j] != 0 { - return i - } - } - } - return -1 -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go b/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go deleted file mode 100644 index b40110486..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -// Ilaenv returns algorithm tuning parameters for the algorithm given by the -// input string. ispec specifies the parameter to return: -// 1: The optimal block size for a blocked algorithm. -// 2: The minimum block size for a blocked algorithm. -// 3: The block size of unprocessed data at which a blocked algorithm should -// crossover to an unblocked version. -// 4: The number of shifts. -// 5: The minimum column dimension for blocking to be used. -// 6: The crossover point for SVD (to use QR factorization or not). -// 7: The number of processors. -// 8: The crossover point for multi-shift in QR and QZ methods for non-symmetric eigenvalue problems. -// 9: Maximum size of the subproblems in divide-and-conquer algorithms. -// 10: ieee NaN arithmetic can be trusted not to trap. -// 11: infinity arithmetic can be trusted not to trap. -// 12...16: parameters for Dhseqr and related functions. See Iparmq for more -// information. -// -// Ilaenv is an internal routine. It is exported for testing purposes. -func (impl Implementation) Ilaenv(ispec int, name string, opts string, n1, n2, n3, n4 int) int { - // TODO(btracey): Replace this with a constant lookup? A list of constants? - sname := name[0] == 'S' || name[0] == 'D' - cname := name[0] == 'C' || name[0] == 'Z' - if !sname && !cname { - panic(badName) - } - c2 := name[1:3] - c3 := name[3:6] - c4 := c3[1:3] - - switch ispec { - default: - panic(badIspec) - case 1: - switch c2 { - default: - panic(badName) - case "GE": - switch c3 { - default: - panic(badName) - case "TRF": - if sname { - return 64 - } - return 64 - case "QRF", "RQF", "LQF", "QLF": - if sname { - return 32 - } - return 32 - case "HRD": - if sname { - return 32 - } - return 32 - case "BRD": - if sname { - return 32 - } - return 32 - case "TRI": - if sname { - return 64 - } - return 64 - } - case "PO": - switch c3 { - default: - panic(badName) - case "TRF": - if sname { - return 64 - } - return 64 - } - case "SY": - switch c3 { - default: - panic(badName) - case "TRF": - if sname { - return 64 - } - return 64 - case "TRD": - return 32 - case "GST": - return 64 - } - case "HE": - switch c3 { - default: - panic(badName) - case "TRF": - return 64 - case "TRD": - return 32 - case "GST": - return 64 - } - case "OR": - switch c3[0] { - default: - panic(badName) - case 'G': - switch c3[1:] { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - case 'M': - switch c3[1:] { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - } - case "UN": - switch c3[0] { - default: - panic(badName) - case 'G': - switch c3[1:] { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - case 'M': - switch c3[1:] { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - } - case "GB": - switch c3 { - default: - panic(badName) - case "TRF": - if sname { - if n4 <= 64 { - return 1 - } - return 32 - } - if n4 <= 64 { - return 1 - } - return 32 - } - case "PB": - switch c3 { - default: - panic(badName) - case "TRF": - if sname { - if n2 <= 64 { - return 1 - } - return 32 - } - if n2 <= 64 { - return 1 - } - return 32 - } - case "TR": - switch c3 { - default: - panic(badName) - case "TRI": - if sname { - return 64 - } - return 64 - case "EVC": - if sname { - return 64 - } - return 64 - } - case "LA": - switch c3 { - default: - panic(badName) - case "UUM": - if sname { - return 64 - } - return 64 - } - case "ST": - if sname && c3 == "EBZ" { - return 1 - } - panic(badName) - } - case 2: - switch c2 { - default: - panic(badName) - case "GE": - switch c3 { - default: - panic(badName) - case "QRF", "RQF", "LQF", "QLF": - if sname { - return 2 - } - return 2 - case "HRD": - if sname { - return 2 - } - return 2 - case "BRD": - if sname { - return 2 - } - return 2 - case "TRI": - if sname { - return 2 - } - return 2 - } - case "SY": - switch c3 { - default: - panic(badName) - case "TRF": - if sname { - return 8 - } - return 8 - case "TRD": - if sname { - return 2 - } - panic(badName) - } - case "HE": - if c3 == "TRD" { - return 2 - } - panic(badName) - case "OR": - if !sname { - panic(badName) - } - switch c3[0] { - default: - panic(badName) - case 'G': - switch c4 { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - case 'M': - switch c4 { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - } - case "UN": - switch c3[0] { - default: - panic(badName) - case 'G': - switch c4 { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - case 'M': - switch c4 { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - } - } - case 3: - switch c2 { - default: - panic(badName) - case "GE": - switch c3 { - default: - panic(badName) - case "QRF", "RQF", "LQF", "QLF": - if sname { - return 128 - } - return 128 - case "HRD": - if sname { - return 128 - } - return 128 - case "BRD": - if sname { - return 128 - } - return 128 - } - case "SY": - if sname && c3 == "TRD" { - return 32 - } - panic(badName) - case "HE": - if c3 == "TRD" { - return 32 - } - panic(badName) - case "OR": - switch c3[0] { - default: - panic(badName) - case 'G': - switch c4 { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 128 - } - } - case "UN": - switch c3[0] { - default: - panic(badName) - case 'G': - switch c4 { - default: - panic(badName) - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 128 - } - } - } - case 4: - // Used by xHSEQR - return 6 - case 5: - // Not used - return 2 - case 6: - // Used by xGELSS and xGESVD - return int(float64(min(n1, n2)) * 1.6) - case 7: - // Not used - return 1 - case 8: - // Used by xHSEQR - return 50 - case 9: - // used by xGELSD and xGESDD - return 25 - case 10: - // Go guarantees ieee - return 1 - case 11: - // Go guarantees ieee - return 1 - case 12, 13, 14, 15, 16: - // Dhseqr and related functions for eigenvalue problems. - return impl.Iparmq(ispec, name, opts, n1, n2, n3, n4) - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go deleted file mode 100644 index 3800f11ce..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "math" - -// Iparmq returns problem and machine dependent parameters useful for Dhseqr and -// related subroutines for eigenvalue problems. -// -// ispec specifies the parameter to return: -// 12: Crossover point between Dlahqr and Dlaqr0. Will be at least 11. -// 13: Deflation window size. -// 14: Nibble crossover point. Determines when to skip a multi-shift QR sweep. -// 15: Number of simultaneous shifts in a multishift QR iteration. -// 16: Select structured matrix multiply. -// For other values of ispec Iparmq will panic. -// -// name is the name of the calling function. name must be in uppercase but this -// is not checked. -// -// opts is not used and exists for future use. -// -// n is the order of the Hessenberg matrix H. -// -// ilo and ihi specify the block [ilo:ihi+1,ilo:ihi+1] that is being processed. -// -// lwork is the amount of workspace available. -// -// Except for ispec input parameters are not checked. -// -// Iparmq is an internal routine. It is exported for testing purposes. -func (Implementation) Iparmq(ispec int, name, opts string, n, ilo, ihi, lwork int) int { - nh := ihi - ilo + 1 - ns := 2 - switch { - case nh >= 30: - ns = 4 - case nh >= 60: - ns = 10 - case nh >= 150: - ns = max(10, nh/int(math.Log(float64(nh))/math.Ln2)) - case nh >= 590: - ns = 64 - case nh >= 3000: - ns = 128 - case nh >= 6000: - ns = 256 - } - ns = max(2, ns-(ns%2)) - - switch ispec { - default: - panic(badIspec) - - case 12: - // Matrices of order smaller than nmin get sent to Dlahqr, the - // classic double shift algorithm. This must be at least 11. - const nmin = 75 - return nmin - - case 13: - const knwswp = 500 - if nh <= knwswp { - return ns - } - return 3 * ns / 2 - - case 14: - // Skip a computationally expensive multi-shift QR sweep with - // Dlaqr5 whenever aggressive early deflation finds at least - // nibble*(window size)/100 deflations. The default, small, - // value reflects the expectation that the cost of looking - // through the deflation window with Dlaqr3 will be - // substantially smaller. - const nibble = 14 - return nibble - - case 15: - return ns - - case 16: - if len(name) != 6 { - panic(badName) - } - const ( - k22min = 14 - kacmin = 14 - ) - var acc22 int - switch { - case name[1:] == "GGHRD" || name[1:] == "GGHD3": - acc22 = 1 - if nh >= k22min { - acc22 = 2 - } - case name[3:] == "EXC": - if nh >= kacmin { - acc22 = 1 - } - if nh >= k22min { - acc22 = 2 - } - case name[1:] == "HSEQR" || name[1:5] == "LAQR": - if ns >= kacmin { - acc22 = 1 - } - if ns >= k22min { - acc22 = 2 - } - } - return acc22 - } -} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go deleted file mode 100644 index 950db32ce..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gonum - -import "gonum.org/v1/gonum/lapack" - -// Implementation is the native Go implementation of LAPACK routines. It -// is built on top of calls to the return of blas64.Implementation(), so while -// this code is in pure Go, the underlying BLAS implementation may not be. -type Implementation struct{} - -var _ lapack.Float64 = Implementation{} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func abs(a int) int { - if a < 0 { - return -a - } - return a -} - -const ( - // dlamchE is the machine epsilon. For IEEE this is 2^{-53}. - dlamchE = 1.0 / (1 << 53) - // TODO(kortschak) Replace this with 0x1p-53 when go1.12 is no - // longer supported. - - // dlamchB is the radix of the machine (the base of the number system). - dlamchB = 2 - - // dlamchP is base * eps. - dlamchP = dlamchB * dlamchE - - // dlamchS is the "safe minimum", that is, the lowest number such that - // 1/dlamchS does not overflow, or also the smallest normal number. - // For IEEE this is 2^{-1022}. - dlamchS = 1.0 / (1 << 256) / (1 << 256) / (1 << 256) / (1 << 254) - // TODO(kortschak) Replace this with 0x1p-1022 when go1.12 is no - // longer supported. -) diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack.go b/vendor/gonum.org/v1/gonum/lapack/lapack.go deleted file mode 100644 index eef14c17a..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/lapack.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lapack - -import "gonum.org/v1/gonum/blas" - -// Complex128 defines the public complex128 LAPACK API supported by gonum/lapack. -type Complex128 interface{} - -// Float64 defines the public float64 LAPACK API supported by gonum/lapack. -type Float64 interface { - Dgecon(norm MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 - Dgeev(jobvl LeftEVJob, jobvr RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) - Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool - Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) - Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) - Dgesvd(jobU, jobVT SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) - Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) - Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) - Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) - Dggsvd3(jobU, jobV, jobQ GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) - Dlantr(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 - Dlange(norm MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 - Dlansy(norm MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 - Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) - Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) - Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) - Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 - Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) - Dpotri(ul blas.Uplo, n int, a []float64, lda int) (ok bool) - Dpotrs(ul blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int) - Dsyev(jobz EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) - Dtrcon(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 - Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) - Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) -} - -// Direct specifies the direction of the multiplication for the Householder matrix. -type Direct byte - -const ( - Forward Direct = 'F' // Reflectors are right-multiplied, H_0 * H_1 * ... * H_{k-1}. - Backward Direct = 'B' // Reflectors are left-multiplied, H_{k-1} * ... * H_1 * H_0. -) - -// Sort is the sorting order. -type Sort byte - -const ( - SortIncreasing Sort = 'I' - SortDecreasing Sort = 'D' -) - -// StoreV indicates the storage direction of elementary reflectors. -type StoreV byte - -const ( - ColumnWise StoreV = 'C' // Reflector stored in a column of the matrix. - RowWise StoreV = 'R' // Reflector stored in a row of the matrix. -) - -// MatrixNorm represents the kind of matrix norm to compute. -type MatrixNorm byte - -const ( - MaxAbs MatrixNorm = 'M' // max(abs(A(i,j))) - MaxColumnSum MatrixNorm = 'O' // Maximum absolute column sum (one norm) - MaxRowSum MatrixNorm = 'I' // Maximum absolute row sum (infinity norm) - Frobenius MatrixNorm = 'F' // Frobenius norm (sqrt of sum of squares) -) - -// MatrixType represents the kind of matrix represented in the data. -type MatrixType byte - -const ( - General MatrixType = 'G' // A general dense matrix. - UpperTri MatrixType = 'U' // An upper triangular matrix. - LowerTri MatrixType = 'L' // A lower triangular matrix. -) - -// Pivot specifies the pivot type for plane rotations. -type Pivot byte - -const ( - Variable Pivot = 'V' - Top Pivot = 'T' - Bottom Pivot = 'B' -) - -// ApplyOrtho specifies which orthogonal matrix is applied in Dormbr. -type ApplyOrtho byte - -const ( - ApplyP ApplyOrtho = 'P' // Apply P or P^T. - ApplyQ ApplyOrtho = 'Q' // Apply Q or Q^T. -) - -// GenOrtho specifies which orthogonal matrix is generated in Dorgbr. -type GenOrtho byte - -const ( - GeneratePT GenOrtho = 'P' // Generate P^T. - GenerateQ GenOrtho = 'Q' // Generate Q. -) - -// SVDJob specifies the singular vector computation type for SVD. -type SVDJob byte - -const ( - SVDAll SVDJob = 'A' // Compute all columns of the orthogonal matrix U or V. - SVDStore SVDJob = 'S' // Compute the singular vectors and store them in the orthogonal matrix U or V. - SVDOverwrite SVDJob = 'O' // Compute the singular vectors and overwrite them on the input matrix A. - SVDNone SVDJob = 'N' // Do not compute singular vectors. -) - -// GSVDJob specifies the singular vector computation type for Generalized SVD. -type GSVDJob byte - -const ( - GSVDU GSVDJob = 'U' // Compute orthogonal matrix U. - GSVDV GSVDJob = 'V' // Compute orthogonal matrix V. - GSVDQ GSVDJob = 'Q' // Compute orthogonal matrix Q. - GSVDUnit GSVDJob = 'I' // Use unit-initialized matrix. - GSVDNone GSVDJob = 'N' // Do not compute orthogonal matrix. -) - -// EVComp specifies how eigenvectors are computed in Dsteqr. -type EVComp byte - -const ( - EVOrig EVComp = 'V' // Compute eigenvectors of the original symmetric matrix. - EVTridiag EVComp = 'I' // Compute eigenvectors of the tridiagonal matrix. - EVCompNone EVComp = 'N' // Do not compute eigenvectors. -) - -// EVJob specifies whether eigenvectors are computed in Dsyev. -type EVJob byte - -const ( - EVCompute EVJob = 'V' // Compute eigenvectors. - EVNone EVJob = 'N' // Do not compute eigenvectors. -) - -// LeftEVJob specifies whether left eigenvectors are computed in Dgeev. -type LeftEVJob byte - -const ( - LeftEVCompute LeftEVJob = 'V' // Compute left eigenvectors. - LeftEVNone LeftEVJob = 'N' // Do not compute left eigenvectors. -) - -// RightEVJob specifies whether right eigenvectors are computed in Dgeev. -type RightEVJob byte - -const ( - RightEVCompute RightEVJob = 'V' // Compute right eigenvectors. - RightEVNone RightEVJob = 'N' // Do not compute right eigenvectors. -) - -// BalanceJob specifies matrix balancing operation. -type BalanceJob byte - -const ( - Permute BalanceJob = 'P' - Scale BalanceJob = 'S' - PermuteScale BalanceJob = 'B' - BalanceNone BalanceJob = 'N' -) - -// SchurJob specifies whether the Schur form is computed in Dhseqr. -type SchurJob byte - -const ( - EigenvaluesOnly SchurJob = 'E' - EigenvaluesAndSchur SchurJob = 'S' -) - -// SchurComp specifies whether and how the Schur vectors are computed in Dhseqr. -type SchurComp byte - -const ( - SchurOrig SchurComp = 'V' // Compute Schur vectors of the original matrix. - SchurHess SchurComp = 'I' // Compute Schur vectors of the upper Hessenberg matrix. - SchurNone SchurComp = 'N' // Do not compute Schur vectors. -) - -// UpdateSchurComp specifies whether the matrix of Schur vectors is updated in Dtrexc. -type UpdateSchurComp byte - -const ( - UpdateSchur UpdateSchurComp = 'V' // Update the matrix of Schur vectors. - UpdateSchurNone UpdateSchurComp = 'N' // Do not update the matrix of Schur vectors. -) - -// EVSide specifies what eigenvectors are computed in Dtrevc3. -type EVSide byte - -const ( - EVRight EVSide = 'R' // Compute only right eigenvectors. - EVLeft EVSide = 'L' // Compute only left eigenvectors. - EVBoth EVSide = 'B' // Compute both right and left eigenvectors. -) - -// EVHowMany specifies which eigenvectors are computed in Dtrevc3 and how. -type EVHowMany byte - -const ( - EVAll EVHowMany = 'A' // Compute all right and/or left eigenvectors. - EVAllMulQ EVHowMany = 'B' // Compute all right and/or left eigenvectors multiplied by an input matrix. - EVSelected EVHowMany = 'S' // Compute selected right and/or left eigenvectors. -) diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go deleted file mode 100644 index da19e3ec7..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lapack64 provides a set of convenient wrapper functions for LAPACK -// calls, as specified in the netlib standard (www.netlib.org). -// -// The native Go routines are used by default, and the Use function can be used -// to set an alternative implementation. -// -// If the type of matrix (General, Symmetric, etc.) is known and fixed, it is -// used in the wrapper signature. In many cases, however, the type of the matrix -// changes during the call to the routine, for example the matrix is symmetric on -// entry and is triangular on exit. In these cases the correct types should be checked -// in the documentation. -// -// The full set of Lapack functions is very large, and it is not clear that a -// full implementation is desirable, let alone feasible. Please open up an issue -// if there is a specific function you need and/or are willing to implement. -package lapack64 // import "gonum.org/v1/gonum/lapack/lapack64" diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go deleted file mode 100644 index 208ee1f43..000000000 --- a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go +++ /dev/null @@ -1,581 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lapack64 - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/gonum" -) - -var lapack64 lapack.Float64 = gonum.Implementation{} - -// Use sets the LAPACK float64 implementation to be used by subsequent BLAS calls. -// The default implementation is native.Implementation. -func Use(l lapack.Float64) { - lapack64 = l -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// Potrf computes the Cholesky factorization of a. -// The factorization has the form -// A = U^T * U if a.Uplo == blas.Upper, or -// A = L * L^T if a.Uplo == blas.Lower, -// where U is an upper triangular matrix and L is lower triangular. -// The triangular matrix is returned in t, and the underlying data between -// a and t is shared. The returned bool indicates whether a is positive -// definite and the factorization could be finished. -func Potrf(a blas64.Symmetric) (t blas64.Triangular, ok bool) { - ok = lapack64.Dpotrf(a.Uplo, a.N, a.Data, max(1, a.Stride)) - t.Uplo = a.Uplo - t.N = a.N - t.Data = a.Data - t.Stride = a.Stride - t.Diag = blas.NonUnit - return -} - -// Potri computes the inverse of a real symmetric positive definite matrix A -// using its Cholesky factorization. -// -// On entry, t contains the triangular factor U or L from the Cholesky -// factorization A = U^T*U or A = L*L^T, as computed by Potrf. -// -// On return, the upper or lower triangle of the (symmetric) inverse of A is -// stored in t, overwriting the input factor U or L, and also returned in a. The -// underlying data between a and t is shared. -// -// The returned bool indicates whether the inverse was computed successfully. -func Potri(t blas64.Triangular) (a blas64.Symmetric, ok bool) { - ok = lapack64.Dpotri(t.Uplo, t.N, t.Data, max(1, t.Stride)) - a.Uplo = t.Uplo - a.N = t.N - a.Data = t.Data - a.Stride = t.Stride - return -} - -// Potrs solves a system of n linear equations A*X = B where A is an n×n -// symmetric positive definite matrix and B is an n×nrhs matrix, using the -// Cholesky factorization A = U^T*U or A = L*L^T. t contains the corresponding -// triangular factor as returned by Potrf. On entry, B contains the right-hand -// side matrix B, on return it contains the solution matrix X. -func Potrs(t blas64.Triangular, b blas64.General) { - lapack64.Dpotrs(t.Uplo, t.N, b.Cols, t.Data, max(1, t.Stride), b.Data, max(1, b.Stride)) -} - -// Gecon estimates the reciprocal of the condition number of the n×n matrix A -// given the LU decomposition of the matrix. The condition number computed may -// be based on the 1-norm or the ∞-norm. -// -// a contains the result of the LU decomposition of A as computed by Getrf. -// -// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. -// -// work is a temporary data slice of length at least 4*n and Gecon will panic otherwise. -// -// iwork is a temporary data slice of length at least n and Gecon will panic otherwise. -func Gecon(norm lapack.MatrixNorm, a blas64.General, anorm float64, work []float64, iwork []int) float64 { - return lapack64.Dgecon(norm, a.Cols, a.Data, max(1, a.Stride), anorm, work, iwork) -} - -// Gels finds a minimum-norm solution based on the matrices A and B using the -// QR or LQ factorization. Gels returns false if the matrix -// A is singular, and true if this solution was successfully found. -// -// The minimization problem solved depends on the input parameters. -// -// 1. If m >= n and trans == blas.NoTrans, Gels finds X such that || A*X - B||_2 -// is minimized. -// 2. If m < n and trans == blas.NoTrans, Gels finds the minimum norm solution of -// A * X = B. -// 3. If m >= n and trans == blas.Trans, Gels finds the minimum norm solution of -// A^T * X = B. -// 4. If m < n and trans == blas.Trans, Gels finds X such that || A*X - B||_2 -// is minimized. -// Note that the least-squares solutions (cases 1 and 3) perform the minimization -// per column of B. This is not the same as finding the minimum-norm matrix. -// -// The matrix A is a general matrix of size m×n and is modified during this call. -// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, -// the elements of b specify the input matrix B. B has size m×nrhs if -// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the -// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, -// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic -// otherwise. A longer work will enable blocked algorithms to be called. -// In the special case that lwork == -1, work[0] will be set to the optimal working -// length. -func Gels(trans blas.Transpose, a blas64.General, b blas64.General, work []float64, lwork int) bool { - return lapack64.Dgels(trans, a.Rows, a.Cols, b.Cols, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride), work, lwork) -} - -// Geqrf computes the QR factorization of the m×n matrix A using a blocked -// algorithm. A is modified to contain the information to construct Q and R. -// The upper triangle of a contains the matrix R. The lower triangular elements -// (not including the diagonal) contain the elementary reflectors. tau is modified -// to contain the reflector scales. tau must have length at least min(m,n), and -// this function will panic otherwise. -// -// The ith elementary reflector can be explicitly constructed by first extracting -// the -// v[j] = 0 j < i -// v[j] = 1 j == i -// v[j] = a[j*lda+i] j > i -// and computing H_i = I - tau[i] * v * v^T. -// -// The orthonormal matrix Q can be constucted from a product of these elementary -// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m and this function will panic otherwise. -// Geqrf is a blocked QR factorization, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Geqrf, -// the optimal work length will be stored into work[0]. -func Geqrf(a blas64.General, tau, work []float64, lwork int) { - lapack64.Dgeqrf(a.Rows, a.Cols, a.Data, max(1, a.Stride), tau, work, lwork) -} - -// Gelqf computes the LQ factorization of the m×n matrix A using a blocked -// algorithm. A is modified to contain the information to construct L and Q. The -// lower triangle of a contains the matrix L. The elements above the diagonal -// and the slice tau represent the matrix Q. tau is modified to contain the -// reflector scales. tau must have length at least min(m,n), and this function -// will panic otherwise. -// -// See Geqrf for a description of the elementary reflectors and orthonormal -// matrix Q. Q is constructed as a product of these elementary reflectors, -// Q = H_{k-1} * ... * H_1 * H_0. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m and this function will panic otherwise. -// Gelqf is a blocked LQ factorization, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Gelqf, -// the optimal work length will be stored into work[0]. -func Gelqf(a blas64.General, tau, work []float64, lwork int) { - lapack64.Dgelqf(a.Rows, a.Cols, a.Data, max(1, a.Stride), tau, work, lwork) -} - -// Gesvd computes the singular value decomposition of the input matrix A. -// -// The singular value decomposition is -// A = U * Sigma * V^T -// where Sigma is an m×n diagonal matrix containing the singular values of A, -// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first -// min(m,n) columns of U and V are the left and right singular vectors of A -// respectively. -// -// jobU and jobVT are options for computing the singular vectors. The behavior -// is as follows -// jobU == lapack.SVDAll All m columns of U are returned in u -// jobU == lapack.SVDStore The first min(m,n) columns are returned in u -// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a -// jobU == lapack.SVDNone The columns of U are not computed. -// The behavior is the same for jobVT and the rows of V^T. At most one of jobU -// and jobVT can equal lapack.SVDOverwrite, and Gesvd will panic otherwise. -// -// On entry, a contains the data for the m×n matrix A. During the call to Gesvd -// the data is overwritten. On exit, A contains the appropriate singular vectors -// if either job is lapack.SVDOverwrite. -// -// s is a slice of length at least min(m,n) and on exit contains the singular -// values in decreasing order. -// -// u contains the left singular vectors on exit, stored columnwise. If -// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDStore u is -// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is -// not used. -// -// vt contains the left singular vectors on exit, stored rowwise. If -// jobV == lapack.SVDAll, vt is of size n×m. If jobVT == lapack.SVDStore vt is -// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is -// not used. -// -// work is a slice for storing temporary memory, and lwork is the usable size of -// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). -// If lwork == -1, instead of performing Gesvd, the optimal work length will be -// stored into work[0]. Gesvd will panic if the working memory has insufficient -// storage. -// -// Gesvd returns whether the decomposition successfully completed. -func Gesvd(jobU, jobVT lapack.SVDJob, a, u, vt blas64.General, s, work []float64, lwork int) (ok bool) { - return lapack64.Dgesvd(jobU, jobVT, a.Rows, a.Cols, a.Data, max(1, a.Stride), s, u.Data, max(1, u.Stride), vt.Data, max(1, vt.Stride), work, lwork) -} - -// Getrf computes the LU decomposition of the m×n matrix A. -// The LU decomposition is a factorization of A into -// A = P * L * U -// where P is a permutation matrix, L is a unit lower triangular matrix, and -// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored -// in place into a. -// -// ipiv is a permutation vector. It indicates that row i of the matrix was -// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic -// otherwise. ipiv is zero-indexed. -// -// Getrf is the blocked version of the algorithm. -// -// Getrf returns whether the matrix A is singular. The LU decomposition will -// be computed regardless of the singularity of A, but division by zero -// will occur if the false is returned and the result is used to solve a -// system of equations. -func Getrf(a blas64.General, ipiv []int) bool { - return lapack64.Dgetrf(a.Rows, a.Cols, a.Data, max(1, a.Stride), ipiv) -} - -// Getri computes the inverse of the matrix A using the LU factorization computed -// by Getrf. On entry, a contains the PLU decomposition of A as computed by -// Getrf and on exit contains the reciprocal of the original matrix. -// -// Getri will not perform the inversion if the matrix is singular, and returns -// a boolean indicating whether the inversion was successful. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= n and this function will panic otherwise. -// Getri is a blocked inversion, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Getri, -// the optimal work length will be stored into work[0]. -func Getri(a blas64.General, ipiv []int, work []float64, lwork int) (ok bool) { - return lapack64.Dgetri(a.Cols, a.Data, max(1, a.Stride), ipiv, work, lwork) -} - -// Getrs solves a system of equations using an LU factorization. -// The system of equations solved is -// A * X = B if trans == blas.Trans -// A^T * X = B if trans == blas.NoTrans -// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. -// -// On entry b contains the elements of the matrix B. On exit, b contains the -// elements of X, the solution to the system of equations. -// -// a and ipiv contain the LU factorization of A and the permutation indices as -// computed by Getrf. ipiv is zero-indexed. -func Getrs(trans blas.Transpose, a blas64.General, b blas64.General, ipiv []int) { - lapack64.Dgetrs(trans, a.Cols, b.Cols, a.Data, max(1, a.Stride), ipiv, b.Data, max(1, b.Stride)) -} - -// Ggsvd3 computes the generalized singular value decomposition (GSVD) -// of an m×n matrix A and p×n matrix B: -// U^T*A*Q = D1*[ 0 R ] -// -// V^T*B*Q = D2*[ 0 R ] -// where U, V and Q are orthogonal matrices. -// -// Ggsvd3 returns k and l, the dimensions of the sub-blocks. k+l -// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. -// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and -// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following -// structures, respectively: -// -// If m-k-l >= 0, -// -// k l -// D1 = k [ I 0 ] -// l [ 0 C ] -// m-k-l [ 0 0 ] -// -// k l -// D2 = l [ 0 S ] -// p-l [ 0 0 ] -// -// n-k-l k l -// [ 0 R ] = k [ 0 R11 R12 ] k -// l [ 0 0 R22 ] l -// -// where -// -// C = diag( alpha_k, ... , alpha_{k+l} ), -// S = diag( beta_k, ... , beta_{k+l} ), -// C^2 + S^2 = I. -// -// R is stored in -// A[0:k+l, n-k-l:n] -// on exit. -// -// If m-k-l < 0, -// -// k m-k k+l-m -// D1 = k [ I 0 0 ] -// m-k [ 0 C 0 ] -// -// k m-k k+l-m -// D2 = m-k [ 0 S 0 ] -// k+l-m [ 0 0 I ] -// p-l [ 0 0 0 ] -// -// n-k-l k m-k k+l-m -// [ 0 R ] = k [ 0 R11 R12 R13 ] -// m-k [ 0 0 R22 R23 ] -// k+l-m [ 0 0 0 R33 ] -// -// where -// C = diag( alpha_k, ... , alpha_m ), -// S = diag( beta_k, ... , beta_m ), -// C^2 + S^2 = I. -// -// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] -// [ 0 R22 R23 ] -// and R33 is stored in -// B[m-k:l, n+m-k-l:n] on exit. -// -// Ggsvd3 computes C, S, R, and optionally the orthogonal transformation -// matrices U, V and Q. -// -// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior -// is as follows -// jobU == lapack.GSVDU Compute orthogonal matrix U -// jobU == lapack.GSVDNone Do not compute orthogonal matrix. -// The behavior is the same for jobV and jobQ with the exception that instead of -// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. -// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the -// relevant job parameter is lapack.GSVDNone. -// -// alpha and beta must have length n or Ggsvd3 will panic. On exit, alpha and -// beta contain the generalized singular value pairs of A and B -// alpha[0:k] = 1, -// beta[0:k] = 0, -// if m-k-l >= 0, -// alpha[k:k+l] = diag(C), -// beta[k:k+l] = diag(S), -// if m-k-l < 0, -// alpha[k:m]= C, alpha[m:k+l]= 0 -// beta[k:m] = S, beta[m:k+l] = 1. -// if k+l < n, -// alpha[k+l:n] = 0 and -// beta[k+l:n] = 0. -// -// On exit, iwork contains the permutation required to sort alpha descending. -// -// iwork must have length n, work must have length at least max(1, lwork), and -// lwork must be -1 or greater than n, otherwise Ggsvd3 will panic. If -// lwork is -1, work[0] holds the optimal lwork on return, but Ggsvd3 does -// not perform the GSVD. -func Ggsvd3(jobU, jobV, jobQ lapack.GSVDJob, a, b blas64.General, alpha, beta []float64, u, v, q blas64.General, work []float64, lwork int, iwork []int) (k, l int, ok bool) { - return lapack64.Dggsvd3(jobU, jobV, jobQ, a.Rows, a.Cols, b.Rows, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride), alpha, beta, u.Data, max(1, u.Stride), v.Data, max(1, v.Stride), q.Data, max(1, q.Stride), work, lwork, iwork) -} - -// Lange computes the matrix norm of the general m×n matrix A. The input norm -// specifies the norm computed. -// lapack.MaxAbs: the maximum absolute value of an element. -// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. -// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. -// lapack.Frobenius: the square root of the sum of the squares of the entries. -// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. -// There are no restrictions on work for the other matrix norms. -func Lange(norm lapack.MatrixNorm, a blas64.General, work []float64) float64 { - return lapack64.Dlange(norm, a.Rows, a.Cols, a.Data, max(1, a.Stride), work) -} - -// Lansy computes the specified norm of an n×n symmetric matrix. If -// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length -// at least n and this function will panic otherwise. -// There are no restrictions on work for the other matrix norms. -func Lansy(norm lapack.MatrixNorm, a blas64.Symmetric, work []float64) float64 { - return lapack64.Dlansy(norm, a.Uplo, a.N, a.Data, max(1, a.Stride), work) -} - -// Lantr computes the specified norm of an m×n trapezoidal matrix A. If -// norm == lapack.MaxColumnSum work must have length at least n and this function -// will panic otherwise. There are no restrictions on work for the other matrix norms. -func Lantr(norm lapack.MatrixNorm, a blas64.Triangular, work []float64) float64 { - return lapack64.Dlantr(norm, a.Uplo, a.Diag, a.N, a.N, a.Data, max(1, a.Stride), work) -} - -// Lapmt rearranges the columns of the m×n matrix X as specified by the -// permutation k_0, k_1, ..., k_{n-1} of the integers 0, ..., n-1. -// -// If forward is true a forward permutation is performed: -// -// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. -// -// otherwise a backward permutation is performed: -// -// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. -// -// k must have length n, otherwise Lapmt will panic. k is zero-indexed. -func Lapmt(forward bool, x blas64.General, k []int) { - lapack64.Dlapmt(forward, x.Rows, x.Cols, x.Data, max(1, x.Stride), k) -} - -// Ormlq multiplies the matrix C by the othogonal matrix Q defined by -// A and tau. A and tau are as returned from Gelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right -// A is of size k×n. This uses a blocked algorithm. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, -// and this function will panic otherwise. -// Ormlq uses a block algorithm, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Ormlq, -// the optimal work length will be stored into work[0]. -// -// Tau contains the Householder scales and must have length at least k, and -// this function will panic otherwise. -func Ormlq(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { - lapack64.Dormlq(side, trans, c.Rows, c.Cols, a.Rows, a.Data, max(1, a.Stride), tau, c.Data, max(1, c.Stride), work, lwork) -} - -// Ormqr multiplies an m×n matrix C by an orthogonal matrix Q as -// C = Q * C, if side == blas.Left and trans == blas.NoTrans, -// C = Q^T * C, if side == blas.Left and trans == blas.Trans, -// C = C * Q, if side == blas.Right and trans == blas.NoTrans, -// C = C * Q^T, if side == blas.Right and trans == blas.Trans, -// where Q is defined as the product of k elementary reflectors -// Q = H_0 * H_1 * ... * H_{k-1}. -// -// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. -// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. -// The ith column of A contains the vector which defines the elementary -// reflector H_i and tau[i] contains its scalar factor. tau must have length k -// and Ormqr will panic otherwise. Geqrf returns A and tau in the required -// form. -// -// work must have length at least max(1,lwork), and lwork must be at least n if -// side == blas.Left and at least m if side == blas.Right, otherwise Ormqr will -// panic. -// -// work is temporary storage, and lwork specifies the usable memory length. At -// minimum, lwork >= m if side == blas.Left and lwork >= n if side == -// blas.Right, and this function will panic otherwise. Larger values of lwork -// will generally give better performance. On return, work[0] will contain the -// optimal value of lwork. -// -// If lwork is -1, instead of performing Ormqr, the optimal workspace size will -// be stored into work[0]. -func Ormqr(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { - lapack64.Dormqr(side, trans, c.Rows, c.Cols, a.Cols, a.Data, max(1, a.Stride), tau, c.Data, max(1, c.Stride), work, lwork) -} - -// Pocon estimates the reciprocal of the condition number of a positive-definite -// matrix A given the Cholesky decmposition of A. The condition number computed -// is based on the 1-norm and the ∞-norm. -// -// anorm is the 1-norm and the ∞-norm of the original matrix A. -// -// work is a temporary data slice of length at least 3*n and Pocon will panic otherwise. -// -// iwork is a temporary data slice of length at least n and Pocon will panic otherwise. -func Pocon(a blas64.Symmetric, anorm float64, work []float64, iwork []int) float64 { - return lapack64.Dpocon(a.Uplo, a.N, a.Data, max(1, a.Stride), anorm, work, iwork) -} - -// Syev computes all eigenvalues and, optionally, the eigenvectors of a real -// symmetric matrix A. -// -// w contains the eigenvalues in ascending order upon return. w must have length -// at least n, and Syev will panic otherwise. -// -// On entry, a contains the elements of the symmetric matrix A in the triangular -// portion specified by uplo. If jobz == lapack.EVCompute, a contains the -// orthonormal eigenvectors of A on exit, otherwise jobz must be lapack.EVNone -// and on exit the specified triangular region is overwritten. -// -// Work is temporary storage, and lwork specifies the usable memory length. At minimum, -// lwork >= 3*n-1, and Syev will panic otherwise. The amount of blocking is -// limited by the usable length. If lwork == -1, instead of computing Syev the -// optimal work length is stored into work[0]. -func Syev(jobz lapack.EVJob, a blas64.Symmetric, w, work []float64, lwork int) (ok bool) { - return lapack64.Dsyev(jobz, a.Uplo, a.N, a.Data, max(1, a.Stride), w, work, lwork) -} - -// Trcon estimates the reciprocal of the condition number of a triangular matrix A. -// The condition number computed may be based on the 1-norm or the ∞-norm. -// -// work is a temporary data slice of length at least 3*n and Trcon will panic otherwise. -// -// iwork is a temporary data slice of length at least n and Trcon will panic otherwise. -func Trcon(norm lapack.MatrixNorm, a blas64.Triangular, work []float64, iwork []int) float64 { - return lapack64.Dtrcon(norm, a.Uplo, a.Diag, a.N, a.Data, max(1, a.Stride), work, iwork) -} - -// Trtri computes the inverse of a triangular matrix, storing the result in place -// into a. -// -// Trtri will not perform the inversion if the matrix is singular, and returns -// a boolean indicating whether the inversion was successful. -func Trtri(a blas64.Triangular) (ok bool) { - return lapack64.Dtrtri(a.Uplo, a.Diag, a.N, a.Data, max(1, a.Stride)) -} - -// Trtrs solves a triangular system of the form A * X = B or A^T * X = B. Trtrs -// returns whether the solve completed successfully. If A is singular, no solve is performed. -func Trtrs(trans blas.Transpose, a blas64.Triangular, b blas64.General) (ok bool) { - return lapack64.Dtrtrs(a.Uplo, trans, a.Diag, a.N, b.Cols, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride)) -} - -// Geev computes the eigenvalues and, optionally, the left and/or right -// eigenvectors for an n×n real nonsymmetric matrix A. -// -// The right eigenvector v_j of A corresponding to an eigenvalue λ_j -// is defined by -// A v_j = λ_j v_j, -// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by -// u_j^H A = λ_j u_j^H, -// where u_j^H is the conjugate transpose of u_j. -// -// On return, A will be overwritten and the left and right eigenvectors will be -// stored, respectively, in the columns of the n×n matrices VL and VR in the -// same order as their eigenvalues. If the j-th eigenvalue is real, then -// u_j = VL[:,j], -// v_j = VR[:,j], -// and if it is not real, then j and j+1 form a complex conjugate pair and the -// eigenvectors can be recovered as -// u_j = VL[:,j] + i*VL[:,j+1], -// u_{j+1} = VL[:,j] - i*VL[:,j+1], -// v_j = VR[:,j] + i*VR[:,j+1], -// v_{j+1} = VR[:,j] - i*VR[:,j+1], -// where i is the imaginary unit. The computed eigenvectors are normalized to -// have Euclidean norm equal to 1 and largest component real. -// -// Left eigenvectors will be computed only if jobvl == lapack.LeftEVCompute, -// otherwise jobvl must be lapack.LeftEVNone. -// Right eigenvectors will be computed only if jobvr == lapack.RightEVCompute, -// otherwise jobvr must be lapack.RightEVNone. -// For other values of jobvl and jobvr Geev will panic. -// -// On return, wr and wi will contain the real and imaginary parts, respectively, -// of the computed eigenvalues. Complex conjugate pairs of eigenvalues appear -// consecutively with the eigenvalue having the positive imaginary part first. -// wr and wi must have length n, and Geev will panic otherwise. -// -// work must have length at least lwork and lwork must be at least max(1,4*n) if -// the left or right eigenvectors are computed, and at least max(1,3*n) if no -// eigenvectors are computed. For good performance, lwork must generally be -// larger. On return, optimal value of lwork will be stored in work[0]. -// -// If lwork == -1, instead of performing Geev, the function only calculates the -// optimal vaule of lwork and stores it into work[0]. -// -// On return, first will be the index of the first valid eigenvalue. -// If first == 0, all eigenvalues and eigenvectors have been computed. -// If first is positive, Geev failed to compute all the eigenvalues, no -// eigenvectors have been computed and wr[first:] and wi[first:] contain those -// eigenvalues which have converged. -func Geev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, a blas64.General, wr, wi []float64, vl, vr blas64.General, work []float64, lwork int) (first int) { - n := a.Rows - if a.Cols != n { - panic("lapack64: matrix not square") - } - if jobvl == lapack.LeftEVCompute && (vl.Rows != n || vl.Cols != n) { - panic("lapack64: bad size of VL") - } - if jobvr == lapack.RightEVCompute && (vr.Rows != n || vr.Cols != n) { - panic("lapack64: bad size of VR") - } - return lapack64.Dgeev(jobvl, jobvr, n, a.Data, max(1, a.Stride), wr, wi, vl.Data, max(1, vl.Stride), vr.Data, max(1, vr.Stride), work, lwork) -} diff --git a/vendor/gonum.org/v1/gonum/mat/band.go b/vendor/gonum.org/v1/gonum/mat/band.go deleted file mode 100644 index 17c1f86e8..000000000 --- a/vendor/gonum.org/v1/gonum/mat/band.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas/blas64" -) - -var ( - bandDense *BandDense - _ Matrix = bandDense - _ Banded = bandDense - _ RawBander = bandDense - - _ NonZeroDoer = bandDense - _ RowNonZeroDoer = bandDense - _ ColNonZeroDoer = bandDense -) - -// BandDense represents a band matrix in dense storage format. -type BandDense struct { - mat blas64.Band -} - -// Banded is a band matrix representation. -type Banded interface { - Matrix - // Bandwidth returns the lower and upper bandwidth values for - // the matrix. The total bandwidth of the matrix is kl+ku+1. - Bandwidth() (kl, ku int) - - // TBand is the equivalent of the T() method in the Matrix - // interface but guarantees the transpose is of banded type. - TBand() Banded -} - -// A RawBander can return a blas64.Band representation of the receiver. -// Changes to the blas64.Band.Data slice will be reflected in the original -// matrix, changes to the Rows, Cols, KL, KU and Stride fields will not. -type RawBander interface { - RawBand() blas64.Band -} - -// A MutableBanded can set elements of a band matrix. -type MutableBanded interface { - Banded - SetBand(i, j int, v float64) -} - -var ( - _ Matrix = TransposeBand{} - _ Banded = TransposeBand{} - _ UntransposeBander = TransposeBand{} -) - -// TransposeBand is a type for performing an implicit transpose of a band -// matrix. It implements the Banded interface, returning values from the -// transpose of the matrix within. -type TransposeBand struct { - Banded Banded -} - -// At returns the value of the element at row i and column j of the transposed -// matrix, that is, row j and column i of the Banded field. -func (t TransposeBand) At(i, j int) float64 { - return t.Banded.At(j, i) -} - -// Dims returns the dimensions of the transposed matrix. -func (t TransposeBand) Dims() (r, c int) { - c, r = t.Banded.Dims() - return r, c -} - -// T performs an implicit transpose by returning the Banded field. -func (t TransposeBand) T() Matrix { - return t.Banded -} - -// Bandwidth returns the lower and upper bandwidth values for -// the transposed matrix. -func (t TransposeBand) Bandwidth() (kl, ku int) { - kl, ku = t.Banded.Bandwidth() - return ku, kl -} - -// TBand performs an implicit transpose by returning the Banded field. -func (t TransposeBand) TBand() Banded { - return t.Banded -} - -// Untranspose returns the Banded field. -func (t TransposeBand) Untranspose() Matrix { - return t.Banded -} - -// UntransposeBand returns the Banded field. -func (t TransposeBand) UntransposeBand() Banded { - return t.Banded -} - -// NewBandDense creates a new Band matrix with r rows and c columns. If data == nil, -// a new slice is allocated for the backing slice. If len(data) == min(r, c+kl)*(kl+ku+1), -// data is used as the backing slice, and changes to the elements of the returned -// BandDense will be reflected in data. If neither of these is true, NewBandDense -// will panic. kl must be at least zero and less r, and ku must be at least zero and -// less than c, otherwise NewBandDense will panic. -// NewBandDense will panic if either r or c is zero. -// -// The data must be arranged in row-major order constructed by removing the zeros -// from the rows outside the band and aligning the diagonals. For example, the matrix -// 1 2 3 0 0 0 -// 4 5 6 7 0 0 -// 0 8 9 10 11 0 -// 0 0 12 13 14 15 -// 0 0 0 16 17 18 -// 0 0 0 0 19 20 -// becomes (* entries are never accessed) -// * 1 2 3 -// 4 5 6 7 -// 8 9 10 11 -// 12 13 14 15 -// 16 17 18 * -// 19 20 * * -// which is passed to NewBandDense as []float64{*, 1, 2, 3, 4, ...} with kl=1 and ku=2. -// Only the values in the band portion of the matrix are used. -func NewBandDense(r, c, kl, ku int, data []float64) *BandDense { - if r <= 0 || c <= 0 || kl < 0 || ku < 0 { - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if kl+1 > r || ku+1 > c { - panic("mat: band out of range") - } - bc := kl + ku + 1 - if data != nil && len(data) != min(r, c+kl)*bc { - panic(ErrShape) - } - if data == nil { - data = make([]float64, min(r, c+kl)*bc) - } - return &BandDense{ - mat: blas64.Band{ - Rows: r, - Cols: c, - KL: kl, - KU: ku, - Stride: bc, - Data: data, - }, - } -} - -// NewDiagonalRect is a convenience function that returns a diagonal matrix represented by a -// BandDense. The length of data must be min(r, c) otherwise NewDiagonalRect will panic. -func NewDiagonalRect(r, c int, data []float64) *BandDense { - return NewBandDense(r, c, 0, 0, data) -} - -// Dims returns the number of rows and columns in the matrix. -func (b *BandDense) Dims() (r, c int) { - return b.mat.Rows, b.mat.Cols -} - -// Bandwidth returns the upper and lower bandwidths of the matrix. -func (b *BandDense) Bandwidth() (kl, ku int) { - return b.mat.KL, b.mat.KU -} - -// T performs an implicit transpose by returning the receiver inside a Transpose. -func (b *BandDense) T() Matrix { - return Transpose{b} -} - -// TBand performs an implicit transpose by returning the receiver inside a TransposeBand. -func (b *BandDense) TBand() Banded { - return TransposeBand{b} -} - -// RawBand returns the underlying blas64.Band used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in returned blas64.Band. -func (b *BandDense) RawBand() blas64.Band { - return b.mat -} - -// SetRawBand sets the underlying blas64.Band used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in the input. -func (b *BandDense) SetRawBand(mat blas64.Band) { - b.mat = mat -} - -// DiagView returns the diagonal as a matrix backed by the original data. -func (b *BandDense) DiagView() Diagonal { - n := min(b.mat.Rows, b.mat.Cols) - return &DiagDense{ - mat: blas64.Vector{ - N: n, - Inc: b.mat.Stride, - Data: b.mat.Data[b.mat.KL : (n-1)*b.mat.Stride+b.mat.KL+1], - }, - } -} - -// DoNonZero calls the function fn for each of the non-zero elements of b. The function fn -// takes a row/column index and the element value of b at (i, j). -func (b *BandDense) DoNonZero(fn func(i, j int, v float64)) { - for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { - for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { - v := b.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - } -} - -// DoRowNonZero calls the function fn for each of the non-zero elements of row i of b. The function fn -// takes a row/column index and the element value of b at (i, j). -func (b *BandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { - if i < 0 || b.mat.Rows <= i { - panic(ErrRowAccess) - } - for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { - v := b.at(i, j) - if v != 0 { - fn(i, j, v) - } - } -} - -// DoColNonZero calls the function fn for each of the non-zero elements of column j of b. The function fn -// takes a row/column index and the element value of b at (i, j). -func (b *BandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { - if j < 0 || b.mat.Cols <= j { - panic(ErrColAccess) - } - for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { - if i-b.mat.KL <= j && j < i+b.mat.KU+1 { - v := b.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - } -} - -// Zero sets all of the matrix elements to zero. -func (b *BandDense) Zero() { - m := b.mat.Rows - kL := b.mat.KL - nCol := b.mat.KU + 1 + kL - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, m+kL-i) - zero(b.mat.Data[i*b.mat.Stride+l : i*b.mat.Stride+u]) - } -} - -// Trace computes the trace of the matrix. -func (b *BandDense) Trace() float64 { - r, c := b.Dims() - if r != c { - panic(ErrShape) - } - rb := b.RawBand() - var tr float64 - for i := 0; i < r; i++ { - tr += rb.Data[rb.KL+i*rb.Stride] - } - return tr -} diff --git a/vendor/gonum.org/v1/gonum/mat/cdense.go b/vendor/gonum.org/v1/gonum/mat/cdense.go deleted file mode 100644 index 9c29d1afd..000000000 --- a/vendor/gonum.org/v1/gonum/mat/cdense.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright ©2019 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import "gonum.org/v1/gonum/blas/cblas128" - -// Dense is a dense matrix representation with complex data. -type CDense struct { - mat cblas128.General - - capRows, capCols int -} - -// Dims returns the number of rows and columns in the matrix. -func (m *CDense) Dims() (r, c int) { - return m.mat.Rows, m.mat.Cols -} - -// H performs an implicit conjugate transpose by returning the receiver inside a -// Conjugate. -func (m *CDense) H() CMatrix { - return Conjugate{m} -} - -// NewCDense creates a new complex Dense matrix with r rows and c columns. -// If data == nil, a new slice is allocated for the backing slice. -// If len(data) == r*c, data is used as the backing slice, and changes to the -// elements of the returned CDense will be reflected in data. -// If neither of these is true, NewCDense will panic. -// NewCDense will panic if either r or c is zero. -// -// The data must be arranged in row-major order, i.e. the (i*c + j)-th -// element in the data slice is the {i, j}-th element in the matrix. -func NewCDense(r, c int, data []complex128) *CDense { - if r <= 0 || c <= 0 { - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if data != nil && r*c != len(data) { - panic(ErrShape) - } - if data == nil { - data = make([]complex128, r*c) - } - return &CDense{ - mat: cblas128.General{ - Rows: r, - Cols: c, - Stride: c, - Data: data, - }, - capRows: r, - capCols: c, - } -} - -// reuseAs resizes an empty matrix to a r×c matrix, -// or checks that a non-empty matrix is r×c. -// -// reuseAs must be kept in sync with reuseAsZeroed. -func (m *CDense) reuseAs(r, c int) { - if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { - // Panic as a string, not a mat.Error. - panic("mat: caps not correctly set") - } - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - if m.IsZero() { - m.mat = cblas128.General{ - Rows: r, - Cols: c, - Stride: c, - Data: useC(m.mat.Data, r*c), - } - m.capRows = r - m.capCols = c - return - } - if r != m.mat.Rows || c != m.mat.Cols { - panic(ErrShape) - } -} - -func (m *CDense) reuseAsZeroed(r, c int) { - // This must be kept in-sync with reuseAs. - if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { - // Panic as a string, not a mat.Error. - panic("mat: caps not correctly set") - } - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - if m.IsZero() { - m.mat = cblas128.General{ - Rows: r, - Cols: c, - Stride: c, - Data: useZeroedC(m.mat.Data, r*c), - } - m.capRows = r - m.capCols = c - return - } - if r != m.mat.Rows || c != m.mat.Cols { - panic(ErrShape) - } - m.Zero() -} - -// Reset zeros the dimensions of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (m *CDense) Reset() { - // Row, Cols and Stride must be zeroed in unison. - m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 - m.capRows, m.capCols = 0, 0 - m.mat.Data = m.mat.Data[:0] -} - -// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the -// receiver for size-restricted operations. CDense matrices can be zeroed using Reset. -func (m *CDense) IsZero() bool { - // It must be the case that m.Dims() returns - // zeros in this case. See comment in Reset(). - return m.mat.Stride == 0 -} - -// Zero sets all of the matrix elements to zero. -func (m *CDense) Zero() { - r := m.mat.Rows - c := m.mat.Cols - for i := 0; i < r; i++ { - zeroC(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) - } -} - -// Copy makes a copy of elements of a into the receiver. It is similar to the -// built-in copy; it copies as much as the overlap between the two matrices and -// returns the number of rows and columns it copied. If a aliases the receiver -// and is a transposed Dense or VecDense, with a non-unitary increment, Copy will -// panic. -// -// See the Copier interface for more information. -func (m *CDense) Copy(a CMatrix) (r, c int) { - r, c = a.Dims() - if a == m { - return r, c - } - r = min(r, m.mat.Rows) - c = min(c, m.mat.Cols) - if r == 0 || c == 0 { - return 0, 0 - } - // TODO(btracey): Check for overlap when complex version exists. - // TODO(btracey): Add fast-paths. - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - m.set(i, j, a.At(i, j)) - } - } - return r, c -} diff --git a/vendor/gonum.org/v1/gonum/mat/cholesky.go b/vendor/gonum.org/v1/gonum/mat/cholesky.go deleted file mode 100644 index 8f54e10cc..000000000 --- a/vendor/gonum.org/v1/gonum/mat/cholesky.go +++ /dev/null @@ -1,673 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack/lapack64" -) - -const ( - badTriangle = "mat: invalid triangle" - badCholesky = "mat: invalid Cholesky factorization" -) - -var ( - _ Matrix = (*Cholesky)(nil) - _ Symmetric = (*Cholesky)(nil) -) - -// Cholesky is a symmetric positive definite matrix represented by its -// Cholesky decomposition. -// -// The decomposition can be constructed using the Factorize method. The -// factorization itself can be extracted using the UTo or LTo methods, and the -// original symmetric matrix can be recovered with ToSym. -// -// Note that this matrix representation is useful for certain operations, in -// particular finding solutions to linear equations. It is very inefficient -// at other operations, in particular At is slow. -// -// Cholesky methods may only be called on a value that has been successfully -// initialized by a call to Factorize that has returned true. Calls to methods -// of an unsuccessful Cholesky factorization will panic. -type Cholesky struct { - // The chol pointer must never be retained as a pointer outside the Cholesky - // struct, either by returning chol outside the struct or by setting it to - // a pointer coming from outside. The same prohibition applies to the data - // slice within chol. - chol *TriDense - cond float64 -} - -// updateCond updates the condition number of the Cholesky decomposition. If -// norm > 0, then that norm is used as the norm of the original matrix A, otherwise -// the norm is estimated from the decomposition. -func (c *Cholesky) updateCond(norm float64) { - n := c.chol.mat.N - work := getFloats(3*n, false) - defer putFloats(work) - if norm < 0 { - // This is an approximation. By the definition of a norm, - // |AB| <= |A| |B|. - // Since A = U^T*U, we get for the condition number κ that - // κ(A) := |A| |A^-1| = |U^T*U| |A^-1| <= |U^T| |U| |A^-1|, - // so this will overestimate the condition number somewhat. - // The norm of the original factorized matrix cannot be stored - // because of update possibilities. - unorm := lapack64.Lantr(CondNorm, c.chol.mat, work) - lnorm := lapack64.Lantr(CondNormTrans, c.chol.mat, work) - norm = unorm * lnorm - } - sym := c.chol.asSymBlas() - iwork := getInts(n, false) - v := lapack64.Pocon(sym, norm, work, iwork) - putInts(iwork) - c.cond = 1 / v -} - -// Dims returns the dimensions of the matrix. -func (ch *Cholesky) Dims() (r, c int) { - if !ch.valid() { - panic(badCholesky) - } - r, c = ch.chol.Dims() - return r, c -} - -// At returns the element at row i, column j. -func (c *Cholesky) At(i, j int) float64 { - if !c.valid() { - panic(badCholesky) - } - n := c.Symmetric() - if uint(i) >= uint(n) { - panic(ErrRowAccess) - } - if uint(j) >= uint(n) { - panic(ErrColAccess) - } - - var val float64 - for k := 0; k <= min(i, j); k++ { - val += c.chol.at(k, i) * c.chol.at(k, j) - } - return val -} - -// T returns the the receiver, the transpose of a symmetric matrix. -func (c *Cholesky) T() Matrix { - return c -} - -// Symmetric implements the Symmetric interface and returns the number of rows -// in the matrix (this is also the number of columns). -func (c *Cholesky) Symmetric() int { - r, _ := c.chol.Dims() - return r -} - -// Cond returns the condition number of the factorized matrix. -func (c *Cholesky) Cond() float64 { - if !c.valid() { - panic(badCholesky) - } - return c.cond -} - -// Factorize calculates the Cholesky decomposition of the matrix A and returns -// whether the matrix is positive definite. If Factorize returns false, the -// factorization must not be used. -func (c *Cholesky) Factorize(a Symmetric) (ok bool) { - n := a.Symmetric() - if c.chol == nil { - c.chol = NewTriDense(n, Upper, nil) - } else { - c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) - } - copySymIntoTriangle(c.chol, a) - - sym := c.chol.asSymBlas() - work := getFloats(c.chol.mat.N, false) - norm := lapack64.Lansy(CondNorm, sym, work) - putFloats(work) - _, ok = lapack64.Potrf(sym) - if ok { - c.updateCond(norm) - } else { - c.Reset() - } - return ok -} - -// Reset resets the factorization so that it can be reused as the receiver of a -// dimensionally restricted operation. -func (c *Cholesky) Reset() { - if c.chol != nil { - c.chol.Reset() - } - c.cond = math.Inf(1) -} - -// SetFromU sets the Cholesky decomposition from the given triangular matrix. -// SetFromU panics if t is not upper triangular. Note that t is copied into, -// not stored inside, the receiver. -func (c *Cholesky) SetFromU(t Triangular) { - n, kind := t.Triangle() - if kind != Upper { - panic("cholesky: matrix must be upper triangular") - } - if c.chol == nil { - c.chol = NewTriDense(n, Upper, nil) - } else { - c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) - } - c.chol.Copy(t) - c.updateCond(-1) -} - -// Clone makes a copy of the input Cholesky into the receiver, overwriting the -// previous value of the receiver. Clone does not place any restrictions on receiver -// shape. Clone panics if the input Cholesky is not the result of a valid decomposition. -func (c *Cholesky) Clone(chol *Cholesky) { - if !chol.valid() { - panic(badCholesky) - } - n := chol.Symmetric() - if c.chol == nil { - c.chol = NewTriDense(n, Upper, nil) - } else { - c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) - } - c.chol.Copy(chol.chol) - c.cond = chol.cond -} - -// Det returns the determinant of the matrix that has been factorized. -func (c *Cholesky) Det() float64 { - if !c.valid() { - panic(badCholesky) - } - return math.Exp(c.LogDet()) -} - -// LogDet returns the log of the determinant of the matrix that has been factorized. -func (c *Cholesky) LogDet() float64 { - if !c.valid() { - panic(badCholesky) - } - var det float64 - for i := 0; i < c.chol.mat.N; i++ { - det += 2 * math.Log(c.chol.mat.Data[i*c.chol.mat.Stride+i]) - } - return det -} - -// SolveTo finds the matrix X that solves A * X = B where A is represented -// by the Cholesky decomposition. The result is stored in-place into dst. -func (c *Cholesky) SolveTo(dst *Dense, b Matrix) error { - if !c.valid() { - panic(badCholesky) - } - n := c.chol.mat.N - bm, bn := b.Dims() - if n != bm { - panic(ErrShape) - } - - dst.reuseAs(bm, bn) - if b != dst { - dst.Copy(b) - } - lapack64.Potrs(c.chol.mat, dst.mat) - if c.cond > ConditionTolerance { - return Condition(c.cond) - } - return nil -} - -// SolveCholTo finds the matrix X that solves A * X = B where A and B are represented -// by their Cholesky decompositions a and b. The result is stored in-place into -// dst. -func (a *Cholesky) SolveCholTo(dst *Dense, b *Cholesky) error { - if !a.valid() || !b.valid() { - panic(badCholesky) - } - bn := b.chol.mat.N - if a.chol.mat.N != bn { - panic(ErrShape) - } - - dst.reuseAsZeroed(bn, bn) - dst.Copy(b.chol.T()) - blas64.Trsm(blas.Left, blas.Trans, 1, a.chol.mat, dst.mat) - blas64.Trsm(blas.Left, blas.NoTrans, 1, a.chol.mat, dst.mat) - blas64.Trmm(blas.Right, blas.NoTrans, 1, b.chol.mat, dst.mat) - if a.cond > ConditionTolerance { - return Condition(a.cond) - } - return nil -} - -// SolveVecTo finds the vector X that solves A * x = b where A is represented -// by the Cholesky decomposition. The result is stored in-place into -// dst. -func (c *Cholesky) SolveVecTo(dst *VecDense, b Vector) error { - if !c.valid() { - panic(badCholesky) - } - n := c.chol.mat.N - if br, bc := b.Dims(); br != n || bc != 1 { - panic(ErrShape) - } - switch rv := b.(type) { - default: - dst.reuseAs(n) - return c.SolveTo(dst.asDense(), b) - case RawVectorer: - bmat := rv.RawVector() - if dst != b { - dst.checkOverlap(bmat) - } - dst.reuseAs(n) - if dst != b { - dst.CopyVec(b) - } - lapack64.Potrs(c.chol.mat, dst.asGeneral()) - if c.cond > ConditionTolerance { - return Condition(c.cond) - } - return nil - } -} - -// RawU returns the Triangular matrix used to store the Cholesky decomposition of -// the original matrix A. The returned matrix should not be modified. If it is -// modified, the decomposition is invalid and should not be used. -func (c *Cholesky) RawU() Triangular { - return c.chol -} - -// UTo extracts the n×n upper triangular matrix U from a Cholesky -// decomposition into dst and returns the result. If dst is nil a new -// TriDense is allocated. -// A = U^T * U. -func (c *Cholesky) UTo(dst *TriDense) *TriDense { - if !c.valid() { - panic(badCholesky) - } - n := c.chol.mat.N - if dst == nil { - dst = NewTriDense(n, Upper, make([]float64, n*n)) - } else { - dst.reuseAs(n, Upper) - } - dst.Copy(c.chol) - return dst -} - -// LTo extracts the n×n lower triangular matrix L from a Cholesky -// decomposition into dst and returns the result. If dst is nil a new -// TriDense is allocated. -// A = L * L^T. -func (c *Cholesky) LTo(dst *TriDense) *TriDense { - if !c.valid() { - panic(badCholesky) - } - n := c.chol.mat.N - if dst == nil { - dst = NewTriDense(n, Lower, make([]float64, n*n)) - } else { - dst.reuseAs(n, Lower) - } - dst.Copy(c.chol.TTri()) - return dst -} - -// ToSym reconstructs the original positive definite matrix given its -// Cholesky decomposition into dst and returns the result. If dst is nil -// a new SymDense is allocated. -func (c *Cholesky) ToSym(dst *SymDense) *SymDense { - if !c.valid() { - panic(badCholesky) - } - n := c.chol.mat.N - if dst == nil { - dst = NewSymDense(n, nil) - } else { - dst.reuseAs(n) - } - // Create a TriDense representing the Cholesky factor U with dst's - // backing slice. - // Operations on u are reflected in s. - u := &TriDense{ - mat: blas64.Triangular{ - Uplo: blas.Upper, - Diag: blas.NonUnit, - N: n, - Data: dst.mat.Data, - Stride: dst.mat.Stride, - }, - cap: n, - } - u.Copy(c.chol) - // Compute the product U^T*U using the algorithm from LAPACK/TESTING/LIN/dpot01.f - a := u.mat.Data - lda := u.mat.Stride - bi := blas64.Implementation() - for k := n - 1; k >= 0; k-- { - a[k*lda+k] = bi.Ddot(k+1, a[k:], lda, a[k:], lda) - if k > 0 { - bi.Dtrmv(blas.Upper, blas.Trans, blas.NonUnit, k, a, lda, a[k:], lda) - } - } - return dst -} - -// InverseTo computes the inverse of the matrix represented by its Cholesky -// factorization and stores the result into s. If the factorized -// matrix is ill-conditioned, a Condition error will be returned. -// Note that matrix inversion is numerically unstable, and should generally be -// avoided where possible, for example by using the Solve routines. -func (c *Cholesky) InverseTo(s *SymDense) error { - if !c.valid() { - panic(badCholesky) - } - s.reuseAs(c.chol.mat.N) - // Create a TriDense representing the Cholesky factor U with the backing - // slice from s. - // Operations on u are reflected in s. - u := &TriDense{ - mat: blas64.Triangular{ - Uplo: blas.Upper, - Diag: blas.NonUnit, - N: s.mat.N, - Data: s.mat.Data, - Stride: s.mat.Stride, - }, - cap: s.mat.N, - } - u.Copy(c.chol) - - _, ok := lapack64.Potri(u.mat) - if !ok { - return Condition(math.Inf(1)) - } - if c.cond > ConditionTolerance { - return Condition(c.cond) - } - return nil -} - -// Scale multiplies the original matrix A by a positive constant using -// its Cholesky decomposition, storing the result in-place into the receiver. -// That is, if the original Cholesky factorization is -// U^T * U = A -// the updated factorization is -// U'^T * U' = f A = A' -// Scale panics if the constant is non-positive, or if the receiver is non-zero -// and is of a different size from the input. -func (c *Cholesky) Scale(f float64, orig *Cholesky) { - if !orig.valid() { - panic(badCholesky) - } - if f <= 0 { - panic("cholesky: scaling by a non-positive constant") - } - n := orig.Symmetric() - if c.chol == nil { - c.chol = NewTriDense(n, Upper, nil) - } else if c.chol.mat.N != n { - panic(ErrShape) - } - c.chol.ScaleTri(math.Sqrt(f), orig.chol) - c.cond = orig.cond // Scaling by a positive constant does not change the condition number. -} - -// ExtendVecSym computes the Cholesky decomposition of the original matrix A, -// whose Cholesky decomposition is in a, extended by a the n×1 vector v according to -// [A w] -// [w' k] -// where k = v[n-1] and w = v[:n-1]. The result is stored into the receiver. -// In order for the updated matrix to be positive definite, it must be the case -// that k > w' A^-1 w. If this condition does not hold then ExtendVecSym will -// return false and the receiver will not be updated. -// -// ExtendVecSym will panic if v.Len() != a.Symmetric()+1 or if a does not contain -// a valid decomposition. -func (c *Cholesky) ExtendVecSym(a *Cholesky, v Vector) (ok bool) { - n := a.Symmetric() - - if v.Len() != n+1 { - panic(badSliceLength) - } - if !a.valid() { - panic(badCholesky) - } - - // The algorithm is commented here, but see also - // https://math.stackexchange.com/questions/955874/cholesky-factor-when-adding-a-row-and-column-to-already-factorized-matrix - // We have A and want to compute the Cholesky of - // [A w] - // [w' k] - // We want - // [U c] - // [0 d] - // to be the updated Cholesky, and so it must be that - // [A w] = [U' 0] [U c] - // [w' k] [c' d] [0 d] - // Thus, we need - // 1) A = U'U (true by the original decomposition being valid), - // 2) U' * c = w => c = U'^-1 w - // 3) c'*c + d'*d = k => d = sqrt(k-c'*c) - - // First, compute c = U'^-1 a - // TODO(btracey): Replace this with CopyVec when issue 167 is fixed. - w := NewVecDense(n, nil) - for i := 0; i < n; i++ { - w.SetVec(i, v.At(i, 0)) - } - k := v.At(n, 0) - - var t VecDense - t.SolveVec(a.chol.T(), w) - - dot := Dot(&t, &t) - if dot >= k { - return false - } - d := math.Sqrt(k - dot) - - newU := NewTriDense(n+1, Upper, nil) - newU.Copy(a.chol) - for i := 0; i < n; i++ { - newU.SetTri(i, n, t.At(i, 0)) - } - newU.SetTri(n, n, d) - c.chol = newU - c.updateCond(-1) - return true -} - -// SymRankOne performs a rank-1 update of the original matrix A and refactorizes -// its Cholesky factorization, storing the result into the receiver. That is, if -// in the original Cholesky factorization -// U^T * U = A, -// in the updated factorization -// U'^T * U' = A + alpha * x * x^T = A'. -// -// Note that when alpha is negative, the updating problem may be ill-conditioned -// and the results may be inaccurate, or the updated matrix A' may not be -// positive definite and not have a Cholesky factorization. SymRankOne returns -// whether the updated matrix A' is positive definite. -// -// SymRankOne updates a Cholesky factorization in O(n²) time. The Cholesky -// factorization computation from scratch is O(n³). -func (c *Cholesky) SymRankOne(orig *Cholesky, alpha float64, x Vector) (ok bool) { - if !orig.valid() { - panic(badCholesky) - } - n := orig.Symmetric() - if r, c := x.Dims(); r != n || c != 1 { - panic(ErrShape) - } - if orig != c { - if c.chol == nil { - c.chol = NewTriDense(n, Upper, nil) - } else if c.chol.mat.N != n { - panic(ErrShape) - } - c.chol.Copy(orig.chol) - } - - if alpha == 0 { - return true - } - - // Algorithms for updating and downdating the Cholesky factorization are - // described, for example, in - // - J. J. Dongarra, J. R. Bunch, C. B. Moler, G. W. Stewart: LINPACK - // Users' Guide. SIAM (1979), pages 10.10--10.14 - // or - // - P. E. Gill, G. H. Golub, W. Murray, and M. A. Saunders: Methods for - // modifying matrix factorizations. Mathematics of Computation 28(126) - // (1974), Method C3 on page 521 - // - // The implementation is based on LINPACK code - // http://www.netlib.org/linpack/dchud.f - // http://www.netlib.org/linpack/dchdd.f - // and - // https://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646 - // - // According to http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00301.html - // LINPACK is released under BSD license. - // - // See also: - // - M. A. Saunders: Large-scale Linear Programming Using the Cholesky - // Factorization. Technical Report Stanford University (1972) - // http://i.stanford.edu/pub/cstr/reports/cs/tr/72/252/CS-TR-72-252.pdf - // - Matthias Seeger: Low rank updates for the Cholesky decomposition. - // EPFL Technical Report 161468 (2004) - // http://infoscience.epfl.ch/record/161468 - - work := getFloats(n, false) - defer putFloats(work) - var xmat blas64.Vector - if rv, ok := x.(RawVectorer); ok { - xmat = rv.RawVector() - } else { - var tmp *VecDense - tmp.CopyVec(x) - xmat = tmp.RawVector() - } - blas64.Copy(xmat, blas64.Vector{N: n, Data: work, Inc: 1}) - - if alpha > 0 { - // Compute rank-1 update. - if alpha != 1 { - blas64.Scal(math.Sqrt(alpha), blas64.Vector{N: n, Data: work, Inc: 1}) - } - umat := c.chol.mat - stride := umat.Stride - for i := 0; i < n; i++ { - // Compute parameters of the Givens matrix that zeroes - // the i-th element of x. - c, s, r, _ := blas64.Rotg(umat.Data[i*stride+i], work[i]) - if r < 0 { - // Multiply by -1 to have positive diagonal - // elemnts. - r *= -1 - c *= -1 - s *= -1 - } - umat.Data[i*stride+i] = r - if i < n-1 { - // Multiply the extended factorization matrix by - // the Givens matrix from the left. Only - // the i-th row and x are modified. - blas64.Rot( - blas64.Vector{N: n - i - 1, Data: umat.Data[i*stride+i+1 : i*stride+n], Inc: 1}, - blas64.Vector{N: n - i - 1, Data: work[i+1 : n], Inc: 1}, - c, s) - } - } - c.updateCond(-1) - return true - } - - // Compute rank-1 downdate. - alpha = math.Sqrt(-alpha) - if alpha != 1 { - blas64.Scal(alpha, blas64.Vector{N: n, Data: work, Inc: 1}) - } - // Solve U^T * p = x storing the result into work. - ok = lapack64.Trtrs(blas.Trans, c.chol.RawTriangular(), blas64.General{ - Rows: n, - Cols: 1, - Stride: 1, - Data: work, - }) - if !ok { - // The original matrix is singular. Should not happen, because - // the factorization is valid. - panic(badCholesky) - } - norm := blas64.Nrm2(blas64.Vector{N: n, Data: work, Inc: 1}) - if norm >= 1 { - // The updated matrix is not positive definite. - return false - } - norm = math.Sqrt((1 + norm) * (1 - norm)) - cos := getFloats(n, false) - defer putFloats(cos) - sin := getFloats(n, false) - defer putFloats(sin) - for i := n - 1; i >= 0; i-- { - // Compute parameters of Givens matrices that zero elements of p - // backwards. - cos[i], sin[i], norm, _ = blas64.Rotg(norm, work[i]) - if norm < 0 { - norm *= -1 - cos[i] *= -1 - sin[i] *= -1 - } - } - umat := c.chol.mat - stride := umat.Stride - for i := n - 1; i >= 0; i-- { - work[i] = 0 - // Apply Givens matrices to U. - // TODO(vladimir-ch): Use workspace to avoid modifying the - // receiver in case an invalid factorization is created. - blas64.Rot( - blas64.Vector{N: n - i, Data: work[i:n], Inc: 1}, - blas64.Vector{N: n - i, Data: umat.Data[i*stride+i : i*stride+n], Inc: 1}, - cos[i], sin[i]) - if umat.Data[i*stride+i] == 0 { - // The matrix is singular (may rarely happen due to - // floating-point effects?). - ok = false - } else if umat.Data[i*stride+i] < 0 { - // Diagonal elements should be positive. If it happens - // that on the i-th row the diagonal is negative, - // multiply U from the left by an identity matrix that - // has -1 on the i-th row. - blas64.Scal(-1, blas64.Vector{N: n - i, Data: umat.Data[i*stride+i : i*stride+n], Inc: 1}) - } - } - if ok { - c.updateCond(-1) - } else { - c.Reset() - } - return ok -} - -func (c *Cholesky) valid() bool { - return c.chol != nil && !c.chol.IsZero() -} diff --git a/vendor/gonum.org/v1/gonum/mat/cmatrix.go b/vendor/gonum.org/v1/gonum/mat/cmatrix.go deleted file mode 100644 index 6219c28aa..000000000 --- a/vendor/gonum.org/v1/gonum/mat/cmatrix.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - "math/cmplx" - - "gonum.org/v1/gonum/floats" -) - -// CMatrix is the basic matrix interface type for complex matrices. -type CMatrix interface { - // Dims returns the dimensions of a Matrix. - Dims() (r, c int) - - // At returns the value of a matrix element at row i, column j. - // It will panic if i or j are out of bounds for the matrix. - At(i, j int) complex128 - - // H returns the conjugate transpose of the Matrix. Whether H - // returns a copy of the underlying data is implementation dependent. - // This method may be implemented using the Conjugate type, which - // provides an implicit matrix conjugate transpose. - H() CMatrix -} - -var ( - _ CMatrix = Conjugate{} - _ Unconjugator = Conjugate{} -) - -// Conjugate is a type for performing an implicit matrix conjugate transpose. -// It implements the Matrix interface, returning values from the conjugate -// transpose of the matrix within. -type Conjugate struct { - CMatrix CMatrix -} - -// At returns the value of the element at row i and column j of the conjugate -// transposed matrix, that is, row j and column i of the Matrix field. -func (t Conjugate) At(i, j int) complex128 { - z := t.CMatrix.At(j, i) - return cmplx.Conj(z) -} - -// Dims returns the dimensions of the transposed matrix. The number of rows returned -// is the number of columns in the Matrix field, and the number of columns is -// the number of rows in the Matrix field. -func (t Conjugate) Dims() (r, c int) { - c, r = t.CMatrix.Dims() - return r, c -} - -// H performs an implicit conjugate transpose by returning the Matrix field. -func (t Conjugate) H() CMatrix { - return t.CMatrix -} - -// Unconjugate returns the Matrix field. -func (t Conjugate) Unconjugate() CMatrix { - return t.CMatrix -} - -// Unconjugator is a type that can undo an implicit conjugate transpose. -type Unconjugator interface { - // Note: This interface is needed to unify all of the Conjugate types. In - // the cmat128 methods, we need to test if the Matrix has been implicitly - // transposed. If this is checked by testing for the specific Conjugate type - // then the behavior will be different if the user uses H() or HTri() for a - // triangular matrix. - - // Unconjugate returns the underlying Matrix stored for the implicit - // conjugate transpose. - Unconjugate() CMatrix -} - -// useC returns a complex128 slice with l elements, using c if it -// has the necessary capacity, otherwise creating a new slice. -func useC(c []complex128, l int) []complex128 { - if l <= cap(c) { - return c[:l] - } - return make([]complex128, l) -} - -// useZeroedC returns a complex128 slice with l elements, using c if it -// has the necessary capacity, otherwise creating a new slice. The -// elements of the returned slice are guaranteed to be zero. -func useZeroedC(c []complex128, l int) []complex128 { - if l <= cap(c) { - c = c[:l] - zeroC(c) - return c - } - return make([]complex128, l) -} - -// zeroC zeros the given slice's elements. -func zeroC(c []complex128) { - for i := range c { - c[i] = 0 - } -} - -// unconjugate unconjugates a matrix if applicable. If a is an Unconjugator, then -// unconjugate returns the underlying matrix and true. If it is not, then it returns -// the input matrix and false. -func unconjugate(a CMatrix) (CMatrix, bool) { - if ut, ok := a.(Unconjugator); ok { - return ut.Unconjugate(), true - } - return a, false -} - -// CEqual returns whether the matrices a and b have the same size -// and are element-wise equal. -func CEqual(a, b CMatrix) bool { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - return false - } - // TODO(btracey): Add in fast-paths. - for i := 0; i < ar; i++ { - for j := 0; j < ac; j++ { - if a.At(i, j) != b.At(i, j) { - return false - } - } - } - return true -} - -// CEqualApprox returns whether the matrices a and b have the same size and contain all equal -// elements with tolerance for element-wise equality specified by epsilon. Matrices -// with non-equal shapes are not equal. -func CEqualApprox(a, b CMatrix, epsilon float64) bool { - // TODO(btracey): - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - return false - } - for i := 0; i < ar; i++ { - for j := 0; j < ac; j++ { - if !cEqualWithinAbsOrRel(a.At(i, j), b.At(i, j), epsilon, epsilon) { - return false - } - } - } - return true -} - -// TODO(btracey): Move these into a cmplxs if/when we have one. - -func cEqualWithinAbsOrRel(a, b complex128, absTol, relTol float64) bool { - if cEqualWithinAbs(a, b, absTol) { - return true - } - return cEqualWithinRel(a, b, relTol) -} - -// cEqualWithinAbs returns true if a and b have an absolute -// difference of less than tol. -func cEqualWithinAbs(a, b complex128, tol float64) bool { - return a == b || cmplx.Abs(a-b) <= tol -} - -const minNormalFloat64 = 2.2250738585072014e-308 - -// cEqualWithinRel returns true if the difference between a and b -// is not greater than tol times the greater value. -func cEqualWithinRel(a, b complex128, tol float64) bool { - if a == b { - return true - } - if cmplx.IsNaN(a) || cmplx.IsNaN(b) { - return false - } - // Cannot play the same trick as in floats because there are multiple - // possible infinities. - if cmplx.IsInf(a) { - if !cmplx.IsInf(b) { - return false - } - ra := real(a) - if math.IsInf(ra, 0) { - if ra == real(b) { - return floats.EqualWithinRel(imag(a), imag(b), tol) - } - return false - } - if imag(a) == imag(b) { - return floats.EqualWithinRel(ra, real(b), tol) - } - return false - } - if cmplx.IsInf(b) { - return false - } - - delta := cmplx.Abs(a - b) - if delta <= minNormalFloat64 { - return delta <= tol*minNormalFloat64 - } - return delta/math.Max(cmplx.Abs(a), cmplx.Abs(b)) <= tol -} diff --git a/vendor/gonum.org/v1/gonum/mat/consts.go b/vendor/gonum.org/v1/gonum/mat/consts.go deleted file mode 100644 index 3de3f5bf4..000000000 --- a/vendor/gonum.org/v1/gonum/mat/consts.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright ©2016 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -// TriKind represents the triangularity of the matrix. -type TriKind bool - -const ( - // Upper specifies an upper triangular matrix. - Upper TriKind = true - // Lower specifies a lower triangular matrix. - Lower TriKind = false -) diff --git a/vendor/gonum.org/v1/gonum/mat/dense.go b/vendor/gonum.org/v1/gonum/mat/dense.go deleted file mode 100644 index f0bcd2a94..000000000 --- a/vendor/gonum.org/v1/gonum/mat/dense.go +++ /dev/null @@ -1,558 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -var ( - dense *Dense - - _ Matrix = dense - _ Mutable = dense - - _ ClonerFrom = dense - _ RowViewer = dense - _ ColViewer = dense - _ RawRowViewer = dense - _ Grower = dense - - _ RawMatrixSetter = dense - _ RawMatrixer = dense - - _ Reseter = dense -) - -// Dense is a dense matrix representation. -type Dense struct { - mat blas64.General - - capRows, capCols int -} - -// NewDense creates a new Dense matrix with r rows and c columns. If data == nil, -// a new slice is allocated for the backing slice. If len(data) == r*c, data is -// used as the backing slice, and changes to the elements of the returned Dense -// will be reflected in data. If neither of these is true, NewDense will panic. -// NewDense will panic if either r or c is zero. -// -// The data must be arranged in row-major order, i.e. the (i*c + j)-th -// element in the data slice is the {i, j}-th element in the matrix. -func NewDense(r, c int, data []float64) *Dense { - if r <= 0 || c <= 0 { - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if data != nil && r*c != len(data) { - panic(ErrShape) - } - if data == nil { - data = make([]float64, r*c) - } - return &Dense{ - mat: blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: data, - }, - capRows: r, - capCols: c, - } -} - -// reuseAs resizes an empty matrix to a r×c matrix, -// or checks that a non-empty matrix is r×c. -// -// reuseAs must be kept in sync with reuseAsZeroed. -func (m *Dense) reuseAs(r, c int) { - if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { - // Panic as a string, not a mat.Error. - panic("mat: caps not correctly set") - } - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - if m.IsZero() { - m.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: use(m.mat.Data, r*c), - } - m.capRows = r - m.capCols = c - return - } - if r != m.mat.Rows || c != m.mat.Cols { - panic(ErrShape) - } -} - -// reuseAsZeroed resizes an empty matrix to a r×c matrix, -// or checks that a non-empty matrix is r×c. It zeroes -// all the elements of the matrix. -// -// reuseAsZeroed must be kept in sync with reuseAs. -func (m *Dense) reuseAsZeroed(r, c int) { - if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { - // Panic as a string, not a mat.Error. - panic("mat: caps not correctly set") - } - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - if m.IsZero() { - m.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: useZeroed(m.mat.Data, r*c), - } - m.capRows = r - m.capCols = c - return - } - if r != m.mat.Rows || c != m.mat.Cols { - panic(ErrShape) - } - m.Zero() -} - -// Zero sets all of the matrix elements to zero. -func (m *Dense) Zero() { - r := m.mat.Rows - c := m.mat.Cols - for i := 0; i < r; i++ { - zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) - } -} - -// isolatedWorkspace returns a new dense matrix w with the size of a and -// returns a callback to defer which performs cleanup at the return of the call. -// This should be used when a method receiver is the same pointer as an input argument. -func (m *Dense) isolatedWorkspace(a Matrix) (w *Dense, restore func()) { - r, c := a.Dims() - if r == 0 || c == 0 { - panic(ErrZeroLength) - } - w = getWorkspace(r, c, false) - return w, func() { - m.Copy(w) - putWorkspace(w) - } -} - -// Reset zeros the dimensions of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (m *Dense) Reset() { - // Row, Cols and Stride must be zeroed in unison. - m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 - m.capRows, m.capCols = 0, 0 - m.mat.Data = m.mat.Data[:0] -} - -// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the -// receiver for size-restricted operations. Dense matrices can be zeroed using Reset. -func (m *Dense) IsZero() bool { - // It must be the case that m.Dims() returns - // zeros in this case. See comment in Reset(). - return m.mat.Stride == 0 -} - -// asTriDense returns a TriDense with the given size and side. The backing data -// of the TriDense is the same as the receiver. -func (m *Dense) asTriDense(n int, diag blas.Diag, uplo blas.Uplo) *TriDense { - return &TriDense{ - mat: blas64.Triangular{ - N: n, - Stride: m.mat.Stride, - Data: m.mat.Data, - Uplo: uplo, - Diag: diag, - }, - cap: n, - } -} - -// DenseCopyOf returns a newly allocated copy of the elements of a. -func DenseCopyOf(a Matrix) *Dense { - d := &Dense{} - d.CloneFrom(a) - return d -} - -// SetRawMatrix sets the underlying blas64.General used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in b. -func (m *Dense) SetRawMatrix(b blas64.General) { - m.capRows, m.capCols = b.Rows, b.Cols - m.mat = b -} - -// RawMatrix returns the underlying blas64.General used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in returned blas64.General. -func (m *Dense) RawMatrix() blas64.General { return m.mat } - -// Dims returns the number of rows and columns in the matrix. -func (m *Dense) Dims() (r, c int) { return m.mat.Rows, m.mat.Cols } - -// Caps returns the number of rows and columns in the backing matrix. -func (m *Dense) Caps() (r, c int) { return m.capRows, m.capCols } - -// T performs an implicit transpose by returning the receiver inside a Transpose. -func (m *Dense) T() Matrix { - return Transpose{m} -} - -// ColView returns a Vector reflecting the column j, backed by the matrix data. -// -// See ColViewer for more information. -func (m *Dense) ColView(j int) Vector { - var v VecDense - v.ColViewOf(m, j) - return &v -} - -// SetCol sets the values in the specified column of the matrix to the values -// in src. len(src) must equal the number of rows in the receiver. -func (m *Dense) SetCol(j int, src []float64) { - if j >= m.mat.Cols || j < 0 { - panic(ErrColAccess) - } - if len(src) != m.mat.Rows { - panic(ErrColLength) - } - - blas64.Copy( - blas64.Vector{N: m.mat.Rows, Inc: 1, Data: src}, - blas64.Vector{N: m.mat.Rows, Inc: m.mat.Stride, Data: m.mat.Data[j:]}, - ) -} - -// SetRow sets the values in the specified rows of the matrix to the values -// in src. len(src) must equal the number of columns in the receiver. -func (m *Dense) SetRow(i int, src []float64) { - if i >= m.mat.Rows || i < 0 { - panic(ErrRowAccess) - } - if len(src) != m.mat.Cols { - panic(ErrRowLength) - } - - copy(m.rawRowView(i), src) -} - -// RowView returns row i of the matrix data represented as a column vector, -// backed by the matrix data. -// -// See RowViewer for more information. -func (m *Dense) RowView(i int) Vector { - var v VecDense - v.RowViewOf(m, i) - return &v -} - -// RawRowView returns a slice backed by the same array as backing the -// receiver. -func (m *Dense) RawRowView(i int) []float64 { - if i >= m.mat.Rows || i < 0 { - panic(ErrRowAccess) - } - return m.rawRowView(i) -} - -func (m *Dense) rawRowView(i int) []float64 { - return m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+m.mat.Cols] -} - -// DiagView returns the diagonal as a matrix backed by the original data. -func (m *Dense) DiagView() Diagonal { - n := min(m.mat.Rows, m.mat.Cols) - return &DiagDense{ - mat: blas64.Vector{ - N: n, - Inc: m.mat.Stride + 1, - Data: m.mat.Data[:(n-1)*m.mat.Stride+n], - }, - } -} - -// Slice returns a new Matrix that shares backing data with the receiver. -// The returned matrix starts at {i,j} of the receiver and extends k-i rows -// and l-j columns. The final row in the resulting matrix is k-1 and the -// final column is l-1. -// Slice panics with ErrIndexOutOfRange if the slice is outside the capacity -// of the receiver. -func (m *Dense) Slice(i, k, j, l int) Matrix { - mr, mc := m.Caps() - if i < 0 || mr <= i || j < 0 || mc <= j || k < i || mr < k || l < j || mc < l { - if i == k || j == l { - panic(ErrZeroLength) - } - panic(ErrIndexOutOfRange) - } - t := *m - t.mat.Data = t.mat.Data[i*t.mat.Stride+j : (k-1)*t.mat.Stride+l] - t.mat.Rows = k - i - t.mat.Cols = l - j - t.capRows -= i - t.capCols -= j - return &t -} - -// Grow returns the receiver expanded by r rows and c columns. If the dimensions -// of the expanded matrix are outside the capacities of the receiver a new -// allocation is made, otherwise not. Note the receiver itself is not modified -// during the call to Grow. -func (m *Dense) Grow(r, c int) Matrix { - if r < 0 || c < 0 { - panic(ErrIndexOutOfRange) - } - if r == 0 && c == 0 { - return m - } - - r += m.mat.Rows - c += m.mat.Cols - - var t Dense - switch { - case m.mat.Rows == 0 || m.mat.Cols == 0: - t.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - // We zero because we don't know how the matrix will be used. - // In other places, the mat is immediately filled with a result; - // this is not the case here. - Data: useZeroed(m.mat.Data, r*c), - } - case r > m.capRows || c > m.capCols: - cr := max(r, m.capRows) - cc := max(c, m.capCols) - t.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: cc, - Data: make([]float64, cr*cc), - } - t.capRows = cr - t.capCols = cc - // Copy the complete matrix over to the new matrix. - // Including elements not currently visible. Use a temporary structure - // to avoid modifying the receiver. - var tmp Dense - tmp.mat = blas64.General{ - Rows: m.mat.Rows, - Cols: m.mat.Cols, - Stride: m.mat.Stride, - Data: m.mat.Data, - } - tmp.capRows = m.capRows - tmp.capCols = m.capCols - t.Copy(&tmp) - return &t - default: - t.mat = blas64.General{ - Data: m.mat.Data[:(r-1)*m.mat.Stride+c], - Rows: r, - Cols: c, - Stride: m.mat.Stride, - } - } - t.capRows = r - t.capCols = c - return &t -} - -// CloneFrom makes a copy of a into the receiver, overwriting the previous value of -// the receiver. The clone from operation does not make any restriction on shape and -// will not cause shadowing. -// -// See the ClonerFrom interface for more information. -func (m *Dense) CloneFrom(a Matrix) { - r, c := a.Dims() - mat := blas64.General{ - Rows: r, - Cols: c, - Stride: c, - } - m.capRows, m.capCols = r, c - - aU, trans := untranspose(a) - switch aU := aU.(type) { - case RawMatrixer: - amat := aU.RawMatrix() - mat.Data = make([]float64, r*c) - if trans { - for i := 0; i < r; i++ { - blas64.Copy(blas64.Vector{N: c, Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, - blas64.Vector{N: c, Inc: 1, Data: mat.Data[i*c : (i+1)*c]}) - } - } else { - for i := 0; i < r; i++ { - copy(mat.Data[i*c:(i+1)*c], amat.Data[i*amat.Stride:i*amat.Stride+c]) - } - } - case *VecDense: - amat := aU.mat - mat.Data = make([]float64, aU.mat.N) - blas64.Copy(blas64.Vector{N: aU.mat.N, Inc: amat.Inc, Data: amat.Data}, - blas64.Vector{N: aU.mat.N, Inc: 1, Data: mat.Data}) - default: - mat.Data = make([]float64, r*c) - w := *m - w.mat = mat - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - w.set(i, j, a.At(i, j)) - } - } - *m = w - return - } - m.mat = mat -} - -// Copy makes a copy of elements of a into the receiver. It is similar to the -// built-in copy; it copies as much as the overlap between the two matrices and -// returns the number of rows and columns it copied. If a aliases the receiver -// and is a transposed Dense or VecDense, with a non-unitary increment, Copy will -// panic. -// -// See the Copier interface for more information. -func (m *Dense) Copy(a Matrix) (r, c int) { - r, c = a.Dims() - if a == m { - return r, c - } - r = min(r, m.mat.Rows) - c = min(c, m.mat.Cols) - if r == 0 || c == 0 { - return 0, 0 - } - - aU, trans := untranspose(a) - switch aU := aU.(type) { - case RawMatrixer: - amat := aU.RawMatrix() - if trans { - if amat.Stride != 1 { - m.checkOverlap(amat) - } - for i := 0; i < r; i++ { - blas64.Copy(blas64.Vector{N: c, Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, - blas64.Vector{N: c, Inc: 1, Data: m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]}) - } - } else { - switch o := offset(m.mat.Data, amat.Data); { - case o < 0: - for i := r - 1; i >= 0; i-- { - copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) - } - case o > 0: - for i := 0; i < r; i++ { - copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) - } - default: - // Nothing to do. - } - } - case *VecDense: - var n, stride int - amat := aU.mat - if trans { - if amat.Inc != 1 { - m.checkOverlap(aU.asGeneral()) - } - n = c - stride = 1 - } else { - n = r - stride = m.mat.Stride - } - if amat.Inc == 1 && stride == 1 { - copy(m.mat.Data, amat.Data[:n]) - break - } - switch o := offset(m.mat.Data, amat.Data); { - case o < 0: - blas64.Copy(blas64.Vector{N: n, Inc: -amat.Inc, Data: amat.Data}, - blas64.Vector{N: n, Inc: -stride, Data: m.mat.Data}) - case o > 0: - blas64.Copy(blas64.Vector{N: n, Inc: amat.Inc, Data: amat.Data}, - blas64.Vector{N: n, Inc: stride, Data: m.mat.Data}) - default: - // Nothing to do. - } - default: - m.checkOverlapMatrix(aU) - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - m.set(i, j, a.At(i, j)) - } - } - } - - return r, c -} - -// Stack appends the rows of b onto the rows of a, placing the result into the -// receiver with b placed in the greater indexed rows. Stack will panic if the -// two input matrices do not have the same number of columns or the constructed -// stacked matrix is not the same shape as the receiver. -func (m *Dense) Stack(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ac != bc || m == a || m == b { - panic(ErrShape) - } - - m.reuseAs(ar+br, ac) - - m.Copy(a) - w := m.Slice(ar, ar+br, 0, bc).(*Dense) - w.Copy(b) -} - -// Augment creates the augmented matrix of a and b, where b is placed in the -// greater indexed columns. Augment will panic if the two input matrices do -// not have the same number of rows or the constructed augmented matrix is -// not the same shape as the receiver. -func (m *Dense) Augment(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || m == a || m == b { - panic(ErrShape) - } - - m.reuseAs(ar, ac+bc) - - m.Copy(a) - w := m.Slice(0, br, ac, ac+bc).(*Dense) - w.Copy(b) -} - -// Trace returns the trace of the matrix. The matrix must be square or Trace -// will panic. -func (m *Dense) Trace() float64 { - if m.mat.Rows != m.mat.Cols { - panic(ErrSquare) - } - // TODO(btracey): could use internal asm sum routine. - var v float64 - for i := 0; i < m.mat.Rows; i++ { - v += m.mat.Data[i*m.mat.Stride+i] - } - return v -} diff --git a/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go b/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go deleted file mode 100644 index 7a4afc86a..000000000 --- a/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go +++ /dev/null @@ -1,880 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack/lapack64" -) - -// Add adds a and b element-wise, placing the result in the receiver. Add -// will panic if the two matrices do not have the same shape. -func (m *Dense) Add(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - panic(ErrShape) - } - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - m.reuseAs(ar, ac) - - if arm, ok := a.(RawMatrixer); ok { - if brm, ok := b.(RawMatrixer); ok { - amat, bmat := arm.RawMatrix(), brm.RawMatrix() - if m != aU { - m.checkOverlap(amat) - } - if m != bU { - m.checkOverlap(bmat) - } - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v + bmat.Data[i+jb] - } - } - return - } - } - - m.checkOverlapMatrix(aU) - m.checkOverlapMatrix(bU) - var restore func() - if m == aU { - m, restore = m.isolatedWorkspace(aU) - defer restore() - } else if m == bU { - m, restore = m.isolatedWorkspace(bU) - defer restore() - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)+b.At(r, c)) - } - } -} - -// Sub subtracts the matrix b from a, placing the result in the receiver. Sub -// will panic if the two matrices do not have the same shape. -func (m *Dense) Sub(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - panic(ErrShape) - } - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - m.reuseAs(ar, ac) - - if arm, ok := a.(RawMatrixer); ok { - if brm, ok := b.(RawMatrixer); ok { - amat, bmat := arm.RawMatrix(), brm.RawMatrix() - if m != aU { - m.checkOverlap(amat) - } - if m != bU { - m.checkOverlap(bmat) - } - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v - bmat.Data[i+jb] - } - } - return - } - } - - m.checkOverlapMatrix(aU) - m.checkOverlapMatrix(bU) - var restore func() - if m == aU { - m, restore = m.isolatedWorkspace(aU) - defer restore() - } else if m == bU { - m, restore = m.isolatedWorkspace(bU) - defer restore() - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)-b.At(r, c)) - } - } -} - -// MulElem performs element-wise multiplication of a and b, placing the result -// in the receiver. MulElem will panic if the two matrices do not have the same -// shape. -func (m *Dense) MulElem(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - panic(ErrShape) - } - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - m.reuseAs(ar, ac) - - if arm, ok := a.(RawMatrixer); ok { - if brm, ok := b.(RawMatrixer); ok { - amat, bmat := arm.RawMatrix(), brm.RawMatrix() - if m != aU { - m.checkOverlap(amat) - } - if m != bU { - m.checkOverlap(bmat) - } - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v * bmat.Data[i+jb] - } - } - return - } - } - - m.checkOverlapMatrix(aU) - m.checkOverlapMatrix(bU) - var restore func() - if m == aU { - m, restore = m.isolatedWorkspace(aU) - defer restore() - } else if m == bU { - m, restore = m.isolatedWorkspace(bU) - defer restore() - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)*b.At(r, c)) - } - } -} - -// DivElem performs element-wise division of a by b, placing the result -// in the receiver. DivElem will panic if the two matrices do not have the same -// shape. -func (m *Dense) DivElem(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - panic(ErrShape) - } - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - m.reuseAs(ar, ac) - - if arm, ok := a.(RawMatrixer); ok { - if brm, ok := b.(RawMatrixer); ok { - amat, bmat := arm.RawMatrix(), brm.RawMatrix() - if m != aU { - m.checkOverlap(amat) - } - if m != bU { - m.checkOverlap(bmat) - } - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v / bmat.Data[i+jb] - } - } - return - } - } - - m.checkOverlapMatrix(aU) - m.checkOverlapMatrix(bU) - var restore func() - if m == aU { - m, restore = m.isolatedWorkspace(aU) - defer restore() - } else if m == bU { - m, restore = m.isolatedWorkspace(bU) - defer restore() - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)/b.At(r, c)) - } - } -} - -// Inverse computes the inverse of the matrix a, storing the result into the -// receiver. If a is ill-conditioned, a Condition error will be returned. -// Note that matrix inversion is numerically unstable, and should generally -// be avoided where possible, for example by using the Solve routines. -func (m *Dense) Inverse(a Matrix) error { - // TODO(btracey): Special case for RawTriangular, etc. - r, c := a.Dims() - if r != c { - panic(ErrSquare) - } - m.reuseAs(a.Dims()) - aU, aTrans := untranspose(a) - switch rm := aU.(type) { - case RawMatrixer: - if m != aU || aTrans { - if m == aU || m.checkOverlap(rm.RawMatrix()) { - tmp := getWorkspace(r, c, false) - tmp.Copy(a) - m.Copy(tmp) - putWorkspace(tmp) - break - } - m.Copy(a) - } - default: - m.Copy(a) - } - ipiv := getInts(r, false) - defer putInts(ipiv) - ok := lapack64.Getrf(m.mat, ipiv) - if !ok { - return Condition(math.Inf(1)) - } - work := getFloats(4*r, false) // must be at least 4*r for cond. - lapack64.Getri(m.mat, ipiv, work, -1) - if int(work[0]) > 4*r { - l := int(work[0]) - putFloats(work) - work = getFloats(l, false) - } else { - work = work[:4*r] - } - defer putFloats(work) - lapack64.Getri(m.mat, ipiv, work, len(work)) - norm := lapack64.Lange(CondNorm, m.mat, work) - rcond := lapack64.Gecon(CondNorm, m.mat, norm, work, ipiv) // reuse ipiv - if rcond == 0 { - return Condition(math.Inf(1)) - } - cond := 1 / rcond - if cond > ConditionTolerance { - return Condition(cond) - } - return nil -} - -// Mul takes the matrix product of a and b, placing the result in the receiver. -// If the number of columns in a does not equal the number of rows in b, Mul will panic. -func (m *Dense) Mul(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - - if ac != br { - panic(ErrShape) - } - - aU, aTrans := untranspose(a) - bU, bTrans := untranspose(b) - m.reuseAs(ar, bc) - var restore func() - if m == aU { - m, restore = m.isolatedWorkspace(aU) - defer restore() - } else if m == bU { - m, restore = m.isolatedWorkspace(bU) - defer restore() - } - aT := blas.NoTrans - if aTrans { - aT = blas.Trans - } - bT := blas.NoTrans - if bTrans { - bT = blas.Trans - } - - // Some of the cases do not have a transpose option, so create - // temporary memory. - // C = A^T * B = (B^T * A)^T - // C^T = B^T * A. - if aU, ok := aU.(RawMatrixer); ok { - amat := aU.RawMatrix() - if restore == nil { - m.checkOverlap(amat) - } - switch bU := bU.(type) { - case RawMatrixer: - bmat := bU.RawMatrix() - if restore == nil { - m.checkOverlap(bmat) - } - blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) - return - - case RawSymmetricer: - bmat := bU.RawSymmetric() - if aTrans { - c := getWorkspace(ac, ar, false) - blas64.Symm(blas.Left, 1, bmat, amat, 0, c.mat) - strictCopy(m, c.T()) - putWorkspace(c) - return - } - blas64.Symm(blas.Right, 1, bmat, amat, 0, m.mat) - return - - case RawTriangular: - // Trmm updates in place, so copy aU first. - bmat := bU.RawTriangular() - if aTrans { - c := getWorkspace(ac, ar, false) - var tmp Dense - tmp.SetRawMatrix(amat) - c.Copy(&tmp) - bT := blas.Trans - if bTrans { - bT = blas.NoTrans - } - blas64.Trmm(blas.Left, bT, 1, bmat, c.mat) - strictCopy(m, c.T()) - putWorkspace(c) - return - } - m.Copy(a) - blas64.Trmm(blas.Right, bT, 1, bmat, m.mat) - return - - case *VecDense: - m.checkOverlap(bU.asGeneral()) - bvec := bU.RawVector() - if bTrans { - // {ar,1} x {1,bc}, which is not a vector. - // Instead, construct B as a General. - bmat := blas64.General{ - Rows: bc, - Cols: 1, - Stride: bvec.Inc, - Data: bvec.Data, - } - blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) - return - } - cvec := blas64.Vector{ - Inc: m.mat.Stride, - Data: m.mat.Data, - } - blas64.Gemv(aT, 1, amat, bvec, 0, cvec) - return - } - } - if bU, ok := bU.(RawMatrixer); ok { - bmat := bU.RawMatrix() - if restore == nil { - m.checkOverlap(bmat) - } - switch aU := aU.(type) { - case RawSymmetricer: - amat := aU.RawSymmetric() - if bTrans { - c := getWorkspace(bc, br, false) - blas64.Symm(blas.Right, 1, amat, bmat, 0, c.mat) - strictCopy(m, c.T()) - putWorkspace(c) - return - } - blas64.Symm(blas.Left, 1, amat, bmat, 0, m.mat) - return - - case RawTriangular: - // Trmm updates in place, so copy bU first. - amat := aU.RawTriangular() - if bTrans { - c := getWorkspace(bc, br, false) - var tmp Dense - tmp.SetRawMatrix(bmat) - c.Copy(&tmp) - aT := blas.Trans - if aTrans { - aT = blas.NoTrans - } - blas64.Trmm(blas.Right, aT, 1, amat, c.mat) - strictCopy(m, c.T()) - putWorkspace(c) - return - } - m.Copy(b) - blas64.Trmm(blas.Left, aT, 1, amat, m.mat) - return - - case *VecDense: - m.checkOverlap(aU.asGeneral()) - avec := aU.RawVector() - if aTrans { - // {1,ac} x {ac, bc} - // Transpose B so that the vector is on the right. - cvec := blas64.Vector{ - Inc: 1, - Data: m.mat.Data, - } - bT := blas.Trans - if bTrans { - bT = blas.NoTrans - } - blas64.Gemv(bT, 1, bmat, avec, 0, cvec) - return - } - // {ar,1} x {1,bc} which is not a vector result. - // Instead, construct A as a General. - amat := blas64.General{ - Rows: ar, - Cols: 1, - Stride: avec.Inc, - Data: avec.Data, - } - blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) - return - } - } - - m.checkOverlapMatrix(aU) - m.checkOverlapMatrix(bU) - row := getFloats(ac, false) - defer putFloats(row) - for r := 0; r < ar; r++ { - for i := range row { - row[i] = a.At(r, i) - } - for c := 0; c < bc; c++ { - var v float64 - for i, e := range row { - v += e * b.At(i, c) - } - m.mat.Data[r*m.mat.Stride+c] = v - } - } -} - -// strictCopy copies a into m panicking if the shape of a and m differ. -func strictCopy(m *Dense, a Matrix) { - r, c := m.Copy(a) - if r != m.mat.Rows || c != m.mat.Cols { - // Panic with a string since this - // is not a user-facing panic. - panic(ErrShape.Error()) - } -} - -// Exp calculates the exponential of the matrix a, e^a, placing the result -// in the receiver. Exp will panic with matrix.ErrShape if a is not square. -func (m *Dense) Exp(a Matrix) { - // The implementation used here is from Functions of Matrices: Theory and Computation - // Chapter 10, Algorithm 10.20. https://doi.org/10.1137/1.9780898717778.ch10 - - r, c := a.Dims() - if r != c { - panic(ErrShape) - } - - m.reuseAs(r, r) - if r == 1 { - m.mat.Data[0] = math.Exp(a.At(0, 0)) - return - } - - pade := []struct { - theta float64 - b []float64 - }{ - {theta: 0.015, b: []float64{ - 120, 60, 12, 1, - }}, - {theta: 0.25, b: []float64{ - 30240, 15120, 3360, 420, 30, 1, - }}, - {theta: 0.95, b: []float64{ - 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1, - }}, - {theta: 2.1, b: []float64{ - 17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1, - }}, - } - - a1 := m - a1.Copy(a) - v := getWorkspace(r, r, true) - vraw := v.RawMatrix() - n := r * r - vvec := blas64.Vector{N: n, Inc: 1, Data: vraw.Data} - defer putWorkspace(v) - - u := getWorkspace(r, r, true) - uraw := u.RawMatrix() - uvec := blas64.Vector{N: n, Inc: 1, Data: uraw.Data} - defer putWorkspace(u) - - a2 := getWorkspace(r, r, false) - defer putWorkspace(a2) - - n1 := Norm(a, 1) - for i, t := range pade { - if n1 > t.theta { - continue - } - - // This loop only executes once, so - // this is not as horrible as it looks. - p := getWorkspace(r, r, true) - praw := p.RawMatrix() - pvec := blas64.Vector{N: n, Inc: 1, Data: praw.Data} - defer putWorkspace(p) - - for k := 0; k < r; k++ { - p.set(k, k, 1) - v.set(k, k, t.b[0]) - u.set(k, k, t.b[1]) - } - - a2.Mul(a1, a1) - for j := 0; j <= i; j++ { - p.Mul(p, a2) - blas64.Axpy(t.b[2*j+2], pvec, vvec) - blas64.Axpy(t.b[2*j+3], pvec, uvec) - } - u.Mul(a1, u) - - // Use p as a workspace here and - // rename u for the second call's - // receiver. - vmu, vpu := u, p - vpu.Add(v, u) - vmu.Sub(v, u) - - m.Solve(vmu, vpu) - return - } - - // Remaining Padé table line. - const theta13 = 5.4 - b := [...]float64{ - 64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, - 129060195264000, 10559470521600, 670442572800, 33522128640, - 1323241920, 40840800, 960960, 16380, 182, 1, - } - - s := math.Log2(n1 / theta13) - if s >= 0 { - s = math.Ceil(s) - a1.Scale(1/math.Pow(2, s), a1) - } - a2.Mul(a1, a1) - - i := getWorkspace(r, r, true) - for j := 0; j < r; j++ { - i.set(j, j, 1) - } - iraw := i.RawMatrix() - ivec := blas64.Vector{N: n, Inc: 1, Data: iraw.Data} - defer putWorkspace(i) - - a2raw := a2.RawMatrix() - a2vec := blas64.Vector{N: n, Inc: 1, Data: a2raw.Data} - - a4 := getWorkspace(r, r, false) - a4raw := a4.RawMatrix() - a4vec := blas64.Vector{N: n, Inc: 1, Data: a4raw.Data} - defer putWorkspace(a4) - a4.Mul(a2, a2) - - a6 := getWorkspace(r, r, false) - a6raw := a6.RawMatrix() - a6vec := blas64.Vector{N: n, Inc: 1, Data: a6raw.Data} - defer putWorkspace(a6) - a6.Mul(a2, a4) - - // V = A_6(b_12*A_6 + b_10*A_4 + b_8*A_2) + b_6*A_6 + b_4*A_4 + b_2*A_2 +b_0*I - blas64.Axpy(b[12], a6vec, vvec) - blas64.Axpy(b[10], a4vec, vvec) - blas64.Axpy(b[8], a2vec, vvec) - v.Mul(v, a6) - blas64.Axpy(b[6], a6vec, vvec) - blas64.Axpy(b[4], a4vec, vvec) - blas64.Axpy(b[2], a2vec, vvec) - blas64.Axpy(b[0], ivec, vvec) - - // U = A(A_6(b_13*A_6 + b_11*A_4 + b_9*A_2) + b_7*A_6 + b_5*A_4 + b_2*A_3 +b_1*I) - blas64.Axpy(b[13], a6vec, uvec) - blas64.Axpy(b[11], a4vec, uvec) - blas64.Axpy(b[9], a2vec, uvec) - u.Mul(u, a6) - blas64.Axpy(b[7], a6vec, uvec) - blas64.Axpy(b[5], a4vec, uvec) - blas64.Axpy(b[3], a2vec, uvec) - blas64.Axpy(b[1], ivec, uvec) - u.Mul(u, a1) - - // Use i as a workspace here and - // rename u for the second call's - // receiver. - vmu, vpu := u, i - vpu.Add(v, u) - vmu.Sub(v, u) - - m.Solve(vmu, vpu) - - for ; s > 0; s-- { - m.Mul(m, m) - } -} - -// Pow calculates the integral power of the matrix a to n, placing the result -// in the receiver. Pow will panic if n is negative or if a is not square. -func (m *Dense) Pow(a Matrix, n int) { - if n < 0 { - panic("matrix: illegal power") - } - r, c := a.Dims() - if r != c { - panic(ErrShape) - } - - m.reuseAs(r, c) - - // Take possible fast paths. - switch n { - case 0: - for i := 0; i < r; i++ { - zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) - m.mat.Data[i*m.mat.Stride+i] = 1 - } - return - case 1: - m.Copy(a) - return - case 2: - m.Mul(a, a) - return - } - - // Perform iterative exponentiation by squaring in work space. - w := getWorkspace(r, r, false) - w.Copy(a) - s := getWorkspace(r, r, false) - s.Copy(a) - x := getWorkspace(r, r, false) - for n--; n > 0; n >>= 1 { - if n&1 != 0 { - x.Mul(w, s) - w, x = x, w - } - if n != 1 { - x.Mul(s, s) - s, x = x, s - } - } - m.Copy(w) - putWorkspace(w) - putWorkspace(s) - putWorkspace(x) -} - -// Scale multiplies the elements of a by f, placing the result in the receiver. -// -// See the Scaler interface for more information. -func (m *Dense) Scale(f float64, a Matrix) { - ar, ac := a.Dims() - - m.reuseAs(ar, ac) - - aU, aTrans := untranspose(a) - if rm, ok := aU.(RawMatrixer); ok { - amat := rm.RawMatrix() - if m == aU || m.checkOverlap(amat) { - var restore func() - m, restore = m.isolatedWorkspace(a) - defer restore() - } - if !aTrans { - for ja, jm := 0, 0; ja < ar*amat.Stride; ja, jm = ja+amat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v * f - } - } - } else { - for ja, jm := 0, 0; ja < ac*amat.Stride; ja, jm = ja+amat.Stride, jm+1 { - for i, v := range amat.Data[ja : ja+ar] { - m.mat.Data[i*m.mat.Stride+jm] = v * f - } - } - } - return - } - - m.checkOverlapMatrix(a) - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, f*a.At(r, c)) - } - } -} - -// Apply applies the function fn to each of the elements of a, placing the -// resulting matrix in the receiver. The function fn takes a row/column -// index and element value and returns some function of that tuple. -func (m *Dense) Apply(fn func(i, j int, v float64) float64, a Matrix) { - ar, ac := a.Dims() - - m.reuseAs(ar, ac) - - aU, aTrans := untranspose(a) - if rm, ok := aU.(RawMatrixer); ok { - amat := rm.RawMatrix() - if m == aU || m.checkOverlap(amat) { - var restore func() - m, restore = m.isolatedWorkspace(a) - defer restore() - } - if !aTrans { - for j, ja, jm := 0, 0, 0; ja < ar*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = fn(j, i, v) - } - } - } else { - for j, ja, jm := 0, 0, 0; ja < ac*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+1 { - for i, v := range amat.Data[ja : ja+ar] { - m.mat.Data[i*m.mat.Stride+jm] = fn(i, j, v) - } - } - } - return - } - - m.checkOverlapMatrix(a) - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, fn(r, c, a.At(r, c))) - } - } -} - -// RankOne performs a rank-one update to the matrix a with the vectors x and -// y, where x and y are treated as column vectors. The result is stored in the -// receiver. If a is zero, see Outer. -// m = a + alpha * x * y^T -func (m *Dense) RankOne(a Matrix, alpha float64, x, y Vector) { - ar, ac := a.Dims() - if x.Len() != ar { - panic(ErrShape) - } - if y.Len() != ac { - panic(ErrShape) - } - - if a != m { - aU, _ := untranspose(a) - if rm, ok := aU.(RawMatrixer); ok { - m.checkOverlap(rm.RawMatrix()) - } - } - - var xmat, ymat blas64.Vector - fast := true - xU, _ := untranspose(x) - if rv, ok := xU.(RawVectorer); ok { - r, c := xU.Dims() - xmat = rv.RawVector() - m.checkOverlap(generalFromVector(xmat, r, c)) - } else { - fast = false - } - yU, _ := untranspose(y) - if rv, ok := yU.(RawVectorer); ok { - r, c := yU.Dims() - ymat = rv.RawVector() - m.checkOverlap(generalFromVector(ymat, r, c)) - } else { - fast = false - } - - if fast { - if m != a { - m.reuseAs(ar, ac) - m.Copy(a) - } - blas64.Ger(alpha, xmat, ymat, m.mat) - return - } - - m.reuseAs(ar, ac) - for i := 0; i < ar; i++ { - for j := 0; j < ac; j++ { - m.set(i, j, a.At(i, j)+alpha*x.AtVec(i)*y.AtVec(j)) - } - } -} - -// Outer calculates the outer product of the vectors x and y, where x and y -// are treated as column vectors, and stores the result in the receiver. -// m = alpha * x * y^T -// In order to update an existing matrix, see RankOne. -func (m *Dense) Outer(alpha float64, x, y Vector) { - r, c := x.Len(), y.Len() - - // Copied from reuseAs with use replaced by useZeroed - // and a final zero of the matrix elements if we pass - // the shape checks. - // TODO(kortschak): Factor out into reuseZeroedAs if - // we find another case that needs it. - if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { - // Panic as a string, not a mat.Error. - panic("mat: caps not correctly set") - } - if m.IsZero() { - m.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: useZeroed(m.mat.Data, r*c), - } - m.capRows = r - m.capCols = c - } else if r != m.mat.Rows || c != m.mat.Cols { - panic(ErrShape) - } - - var xmat, ymat blas64.Vector - fast := true - xU, _ := untranspose(x) - if rv, ok := xU.(RawVectorer); ok { - r, c := xU.Dims() - xmat = rv.RawVector() - m.checkOverlap(generalFromVector(xmat, r, c)) - } else { - fast = false - } - yU, _ := untranspose(y) - if rv, ok := yU.(RawVectorer); ok { - r, c := yU.Dims() - ymat = rv.RawVector() - m.checkOverlap(generalFromVector(ymat, r, c)) - } else { - fast = false - } - - if fast { - for i := 0; i < r; i++ { - zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) - } - blas64.Ger(alpha, xmat, ymat, m.mat) - return - } - - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - m.set(i, j, alpha*x.AtVec(i)*y.AtVec(j)) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/mat/diagonal.go b/vendor/gonum.org/v1/gonum/mat/diagonal.go deleted file mode 100644 index beb2e434c..000000000 --- a/vendor/gonum.org/v1/gonum/mat/diagonal.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -var ( - diagDense *DiagDense - _ Matrix = diagDense - _ Diagonal = diagDense - _ MutableDiagonal = diagDense - _ Triangular = diagDense - _ TriBanded = diagDense - _ Symmetric = diagDense - _ SymBanded = diagDense - _ Banded = diagDense - _ RawBander = diagDense - _ RawSymBander = diagDense - - diag Diagonal - _ Matrix = diag - _ Diagonal = diag - _ Triangular = diag - _ TriBanded = diag - _ Symmetric = diag - _ SymBanded = diag - _ Banded = diag -) - -// Diagonal represents a diagonal matrix, that is a square matrix that only -// has non-zero terms on the diagonal. -type Diagonal interface { - Matrix - // Diag returns the number of rows/columns in the matrix. - Diag() int - - // Bandwidth and TBand are included in the Diagonal interface - // to allow the use of Diagonal types in banded functions. - // Bandwidth will always return (0, 0). - Bandwidth() (kl, ku int) - TBand() Banded - - // Triangle and TTri are included in the Diagonal interface - // to allow the use of Diagonal types in triangular functions. - Triangle() (int, TriKind) - TTri() Triangular - - // Symmetric and SymBand are included in the Diagonal interface - // to allow the use of Diagonal types in symmetric and banded symmetric - // functions respectively. - Symmetric() int - SymBand() (n, k int) - - // TriBand and TTriBand are included in the Diagonal interface - // to allow the use of Diagonal types in triangular banded functions. - TriBand() (n, k int, kind TriKind) - TTriBand() TriBanded -} - -// MutableDiagonal is a Diagonal matrix whose elements can be set. -type MutableDiagonal interface { - Diagonal - SetDiag(i int, v float64) -} - -// DiagDense represents a diagonal matrix in dense storage format. -type DiagDense struct { - mat blas64.Vector -} - -// NewDiagDense creates a new Diagonal matrix with n rows and n columns. -// The length of data must be n or data must be nil, otherwise NewDiagDense -// will panic. NewDiagDense will panic if n is zero. -func NewDiagDense(n int, data []float64) *DiagDense { - if n <= 0 { - if n == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if data == nil { - data = make([]float64, n) - } - if len(data) != n { - panic(ErrShape) - } - return &DiagDense{ - mat: blas64.Vector{N: n, Data: data, Inc: 1}, - } -} - -// Diag returns the dimension of the receiver. -func (d *DiagDense) Diag() int { - return d.mat.N -} - -// Dims returns the dimensions of the matrix. -func (d *DiagDense) Dims() (r, c int) { - return d.mat.N, d.mat.N -} - -// T returns the transpose of the matrix. -func (d *DiagDense) T() Matrix { - return d -} - -// TTri returns the transpose of the matrix. Note that Diagonal matrices are -// Upper by default. -func (d *DiagDense) TTri() Triangular { - return TransposeTri{d} -} - -// TBand performs an implicit transpose by returning the receiver inside a -// TransposeBand. -func (d *DiagDense) TBand() Banded { - return TransposeBand{d} -} - -// TTriBand performs an implicit transpose by returning the receiver inside a -// TransposeTriBand. Note that Diagonal matrices are Upper by default. -func (d *DiagDense) TTriBand() TriBanded { - return TransposeTriBand{d} -} - -// Bandwidth returns the upper and lower bandwidths of the matrix. -// These values are always zero for diagonal matrices. -func (d *DiagDense) Bandwidth() (kl, ku int) { - return 0, 0 -} - -// Symmetric implements the Symmetric interface. -func (d *DiagDense) Symmetric() int { - return d.mat.N -} - -// SymBand returns the number of rows/columns in the matrix, and the size of -// the bandwidth. -func (d *DiagDense) SymBand() (n, k int) { - return d.mat.N, 0 -} - -// Triangle implements the Triangular interface. -func (d *DiagDense) Triangle() (int, TriKind) { - return d.mat.N, Upper -} - -// TriBand returns the number of rows/columns in the matrix, the -// size of the bandwidth, and the orientation. Note that Diagonal matrices are -// Upper by default. -func (d *DiagDense) TriBand() (n, k int, kind TriKind) { - return d.mat.N, 0, Upper -} - -// Reset zeros the length of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (d *DiagDense) Reset() { - // No change of Inc or n to 0 may be - // made unless both are set to 0. - d.mat.Inc = 0 - d.mat.N = 0 - d.mat.Data = d.mat.Data[:0] -} - -// Zero sets all of the matrix elements to zero. -func (d *DiagDense) Zero() { - for i := 0; i < d.mat.N; i++ { - d.mat.Data[d.mat.Inc*i] = 0 - } -} - -// DiagView returns the diagonal as a matrix backed by the original data. -func (d *DiagDense) DiagView() Diagonal { - return d -} - -// DiagFrom copies the diagonal of m into the receiver. The receiver must -// be min(r, c) long or zero. Otherwise DiagFrom will panic. -func (d *DiagDense) DiagFrom(m Matrix) { - n := min(m.Dims()) - d.reuseAs(n) - - var vec blas64.Vector - switch r := m.(type) { - case *DiagDense: - vec = r.mat - case RawBander: - mat := r.RawBand() - vec = blas64.Vector{ - N: n, - Inc: mat.Stride, - Data: mat.Data[mat.KL : (n-1)*mat.Stride+mat.KL+1], - } - case RawMatrixer: - mat := r.RawMatrix() - vec = blas64.Vector{ - N: n, - Inc: mat.Stride + 1, - Data: mat.Data[:(n-1)*mat.Stride+n], - } - case RawSymBander: - mat := r.RawSymBand() - vec = blas64.Vector{ - N: n, - Inc: mat.Stride, - Data: mat.Data[:(n-1)*mat.Stride+1], - } - case RawSymmetricer: - mat := r.RawSymmetric() - vec = blas64.Vector{ - N: n, - Inc: mat.Stride + 1, - Data: mat.Data[:(n-1)*mat.Stride+n], - } - case RawTriBander: - mat := r.RawTriBand() - data := mat.Data - if mat.Uplo == blas.Lower { - data = data[mat.K:] - } - vec = blas64.Vector{ - N: n, - Inc: mat.Stride, - Data: data[:(n-1)*mat.Stride+1], - } - case RawTriangular: - mat := r.RawTriangular() - if mat.Diag == blas.Unit { - for i := 0; i < n; i += d.mat.Inc { - d.mat.Data[i] = 1 - } - return - } - vec = blas64.Vector{ - N: n, - Inc: mat.Stride + 1, - Data: mat.Data[:(n-1)*mat.Stride+n], - } - case RawVectorer: - d.mat.Data[0] = r.RawVector().Data[0] - return - default: - for i := 0; i < n; i++ { - d.setDiag(i, m.At(i, i)) - } - return - } - blas64.Copy(vec, d.mat) -} - -// RawBand returns the underlying data used by the receiver represented -// as a blas64.Band. -// Changes to elements in the receiver following the call will be reflected -// in returned blas64.Band. -func (d *DiagDense) RawBand() blas64.Band { - return blas64.Band{ - Rows: d.mat.N, - Cols: d.mat.N, - KL: 0, - KU: 0, - Stride: d.mat.Inc, - Data: d.mat.Data, - } -} - -// RawSymBand returns the underlying data used by the receiver represented -// as a blas64.SymmetricBand. -// Changes to elements in the receiver following the call will be reflected -// in returned blas64.Band. -func (d *DiagDense) RawSymBand() blas64.SymmetricBand { - return blas64.SymmetricBand{ - N: d.mat.N, - K: 0, - Stride: d.mat.Inc, - Uplo: blas.Upper, - Data: d.mat.Data, - } -} - -// reuseAs resizes an empty diagonal to a r×r diagonal, -// or checks that a non-empty matrix is r×r. -func (d *DiagDense) reuseAs(r int) { - if r == 0 { - panic(ErrZeroLength) - } - if d.IsZero() { - d.mat = blas64.Vector{ - Inc: 1, - Data: use(d.mat.Data, r), - } - d.mat.N = r - return - } - if r != d.mat.N { - panic(ErrShape) - } -} - -// IsZero returns whether the receiver is zero-sized. Zero-sized vectors can be the -// receiver for size-restricted operations. DiagDenses can be zeroed using Reset. -func (d *DiagDense) IsZero() bool { - // It must be the case that d.Dims() returns - // zeros in this case. See comment in Reset(). - return d.mat.Inc == 0 -} - -// Trace returns the trace. -func (d *DiagDense) Trace() float64 { - rb := d.RawBand() - var tr float64 - for i := 0; i < rb.Rows; i++ { - tr += rb.Data[rb.KL+i*rb.Stride] - } - return tr - -} diff --git a/vendor/gonum.org/v1/gonum/mat/doc.go b/vendor/gonum.org/v1/gonum/mat/doc.go deleted file mode 100644 index 2cc910015..000000000 --- a/vendor/gonum.org/v1/gonum/mat/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mat provides implementations of float64 and complex128 matrix -// structures and linear algebra operations on them. -// -// Overview -// -// This section provides a quick overview of the mat package. The following -// sections provide more in depth commentary. -// -// mat provides: -// - Interfaces for Matrix classes (Matrix, Symmetric, Triangular) -// - Concrete implementations (Dense, SymDense, TriDense) -// - Methods and functions for using matrix data (Add, Trace, SymRankOne) -// - Types for constructing and using matrix factorizations (QR, LU) -// - The complementary types for complex matrices, CMatrix, CSymDense, etc. -// -// A matrix may be constructed through the corresponding New function. If no -// backing array is provided the matrix will be initialized to all zeros. -// // Allocate a zeroed real matrix of size 3×5 -// zero := mat.NewDense(3, 5, nil) -// If a backing data slice is provided, the matrix will have those elements. -// Matrices are all stored in row-major format. -// // Generate a 6×6 matrix of random values. -// data := make([]float64, 36) -// for i := range data { -// data[i] = rand.NormFloat64() -// } -// a := mat.NewDense(6, 6, data) -// Operations involving matrix data are implemented as functions when the values -// of the matrix remain unchanged -// tr := mat.Trace(a) -// and are implemented as methods when the operation modifies the receiver. -// zero.Copy(a) -// -// Receivers must be the correct size for the matrix operations, otherwise the -// operation will panic. As a special case for convenience, a zero-value matrix -// will be modified to have the correct size, allocating data if necessary. -// var c mat.Dense // construct a new zero-sized matrix -// c.Mul(a, a) // c is automatically adjusted to be 6×6 -// -// Zero-value of a matrix -// -// A zero-value matrix is either the Go language definition of a zero-value or -// is a zero-sized matrix with zero-length stride. Matrix implementations may have -// a Reset method to revert the receiver into a zero-valued matrix and an IsZero -// method that returns whether the matrix is zero-valued. -// So the following will all result in a zero-value matrix. -// - var a mat.Dense -// - a := NewDense(0, 0, make([]float64, 0, 100)) -// - a.Reset() -// A zero-value matrix can not be sliced even if it does have an adequately sized -// backing data slice, but can be expanded using its Grow method if it exists. -// -// The Matrix Interfaces -// -// The Matrix interface is the common link between the concrete types of real -// matrices, The Matrix interface is defined by three functions: Dims, which -// returns the dimensions of the Matrix, At, which returns the element in the -// specified location, and T for returning a Transpose (discussed later). All of -// the concrete types can perform these behaviors and so implement the interface. -// Methods and functions are designed to use this interface, so in particular the method -// func (m *Dense) Mul(a, b Matrix) -// constructs a *Dense from the result of a multiplication with any Matrix types, -// not just *Dense. Where more restrictive requirements must be met, there are also the -// Symmetric and Triangular interfaces. For example, in -// func (s *SymDense) AddSym(a, b Symmetric) -// the Symmetric interface guarantees a symmetric result. -// -// The CMatrix interface plays the same role for complex matrices. The difference -// is that the CMatrix type has the H method instead T, for returning the conjugate -// transpose. -// -// (Conjugate) Transposes -// -// The T method is used for transposition on real matrices, and H is used for -// conjugate transposition on complex matrices. For example, c.Mul(a.T(), b) computes -// c = a^T * b. The mat types implement this method implicitly — -// see the Transpose and Conjugate types for more details. Note that some -// operations have a transpose as part of their definition, as in *SymDense.SymOuterK. -// -// Matrix Factorization -// -// Matrix factorizations, such as the LU decomposition, typically have their own -// specific data storage, and so are each implemented as a specific type. The -// factorization can be computed through a call to Factorize -// var lu mat.LU -// lu.Factorize(a) -// The elements of the factorization can be extracted through methods on the -// factorized type, i.e. *LU.UTo. The factorization types can also be used directly, -// as in *Dense.SolveCholesky. Some factorizations can be updated directly, -// without needing to update the original matrix and refactorize, -// as in *LU.RankOne. -// -// BLAS and LAPACK -// -// BLAS and LAPACK are the standard APIs for linear algebra routines. Many -// operations in mat are implemented using calls to the wrapper functions -// in gonum/blas/blas64 and gonum/lapack/lapack64 and their complex equivalents. -// By default, blas64 and lapack64 call the native Go implementations of the -// routines. Alternatively, it is possible to use C-based implementations of the -// APIs through the respective cgo packages and "Use" functions. The Go -// implementation of LAPACK (used by default) makes calls -// through blas64, so if a cgo BLAS implementation is registered, the lapack64 -// calls will be partially executed in Go and partially executed in C. -// -// Type Switching -// -// The Matrix abstraction enables efficiency as well as interoperability. Go's -// type reflection capabilities are used to choose the most efficient routine -// given the specific concrete types. For example, in -// c.Mul(a, b) -// if a and b both implement RawMatrixer, that is, they can be represented as a -// blas64.General, blas64.Gemm (general matrix multiplication) is called, while -// instead if b is a RawSymmetricer blas64.Symm is used (general-symmetric -// multiplication), and if b is a *VecDense blas64.Gemv is used. -// -// There are many possible type combinations and special cases. No specific guarantees -// are made about the performance of any method, and in particular, note that an -// abstract matrix type may be copied into a concrete type of the corresponding -// value. If there are specific special cases that are needed, please submit a -// pull-request or file an issue. -// -// Invariants -// -// Matrix input arguments to functions are never directly modified. If an operation -// changes Matrix data, the mutated matrix will be the receiver of a function. -// -// For convenience, a matrix may be used as both a receiver and as an input, e.g. -// a.Pow(a, 6) -// v.SolveVec(a.T(), v) -// though in many cases this will cause an allocation (see Element Aliasing). -// An exception to this rule is Copy, which does not allow a.Copy(a.T()). -// -// Element Aliasing -// -// Most methods in mat modify receiver data. It is forbidden for the modified -// data region of the receiver to overlap the used data area of the input -// arguments. The exception to this rule is when the method receiver is equal to one -// of the input arguments, as in the a.Pow(a, 6) call above, or its implicit transpose. -// -// This prohibition is to help avoid subtle mistakes when the method needs to read -// from and write to the same data region. There are ways to make mistakes using the -// mat API, and mat functions will detect and complain about those. -// There are many ways to make mistakes by excursion from the mat API via -// interaction with raw matrix values. -// -// If you need to read the rest of this section to understand the behavior of -// your program, you are being clever. Don't be clever. If you must be clever, -// blas64 and lapack64 may be used to call the behavior directly. -// -// mat will use the following rules to detect overlap between the receiver and one -// of the inputs: -// - the input implements one of the Raw methods, and -// - the address ranges of the backing data slices overlap, and -// - the strides differ or there is an overlap in the used data elements. -// If such an overlap is detected, the method will panic. -// -// The following cases will not panic: -// - the data slices do not overlap, -// - there is pointer identity between the receiver and input values after -// the value has been untransposed if necessary. -// -// mat will not attempt to detect element overlap if the input does not implement a -// Raw method. Method behavior is undefined if there is undetected overlap. -// -package mat // import "gonum.org/v1/gonum/mat" diff --git a/vendor/gonum.org/v1/gonum/mat/eigen.go b/vendor/gonum.org/v1/gonum/mat/eigen.go deleted file mode 100644 index aaf7fa538..000000000 --- a/vendor/gonum.org/v1/gonum/mat/eigen.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/lapack64" -) - -const ( - badFact = "mat: use without successful factorization" - badNoVect = "mat: eigenvectors not computed" -) - -// EigenSym is a type for creating and manipulating the Eigen decomposition of -// symmetric matrices. -type EigenSym struct { - vectorsComputed bool - - values []float64 - vectors *Dense -} - -// Factorize computes the eigenvalue decomposition of the symmetric matrix a. -// The Eigen decomposition is defined as -// A = P * D * P^-1 -// where D is a diagonal matrix containing the eigenvalues of the matrix, and -// P is a matrix of the eigenvectors of A. Factorize computes the eigenvalues -// in ascending order. If the vectors input argument is false, the eigenvectors -// are not computed. -// -// Factorize returns whether the decomposition succeeded. If the decomposition -// failed, methods that require a successful factorization will panic. -func (e *EigenSym) Factorize(a Symmetric, vectors bool) (ok bool) { - // kill previous decomposition - e.vectorsComputed = false - e.values = e.values[:] - - n := a.Symmetric() - sd := NewSymDense(n, nil) - sd.CopySym(a) - - jobz := lapack.EVNone - if vectors { - jobz = lapack.EVCompute - } - w := make([]float64, n) - work := []float64{0} - lapack64.Syev(jobz, sd.mat, w, work, -1) - - work = getFloats(int(work[0]), false) - ok = lapack64.Syev(jobz, sd.mat, w, work, len(work)) - putFloats(work) - if !ok { - e.vectorsComputed = false - e.values = nil - e.vectors = nil - return false - } - e.vectorsComputed = vectors - e.values = w - e.vectors = NewDense(n, n, sd.mat.Data) - return true -} - -// succFact returns whether the receiver contains a successful factorization. -func (e *EigenSym) succFact() bool { - return len(e.values) != 0 -} - -// Values extracts the eigenvalues of the factorized matrix. If dst is -// non-nil, the values are stored in-place into dst. In this case -// dst must have length n, otherwise Values will panic. If dst is -// nil, then a new slice will be allocated of the proper length and filled -// with the eigenvalues. -// -// Values panics if the Eigen decomposition was not successful. -func (e *EigenSym) Values(dst []float64) []float64 { - if !e.succFact() { - panic(badFact) - } - if dst == nil { - dst = make([]float64, len(e.values)) - } - if len(dst) != len(e.values) { - panic(ErrSliceLengthMismatch) - } - copy(dst, e.values) - return dst -} - -// VectorsTo returns the eigenvectors of the decomposition. VectorsTo -// will panic if the eigenvectors were not computed during the factorization, -// or if the factorization was not successful. -// -// If dst is not nil, the eigenvectors are stored in-place into dst, and dst -// must have size n×n and panics otherwise. If dst is nil, a new matrix -// is allocated and returned. -func (e *EigenSym) VectorsTo(dst *Dense) *Dense { - if !e.succFact() { - panic(badFact) - } - if !e.vectorsComputed { - panic(badNoVect) - } - r, c := e.vectors.Dims() - if dst == nil { - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(r, c) - } - dst.Copy(e.vectors) - return dst -} - -// EigenKind specifies the computation of eigenvectors during factorization. -type EigenKind int - -const ( - // EigenNone specifies to not compute any eigenvectors. - EigenNone EigenKind = 0 - // EigenLeft specifies to compute the left eigenvectors. - EigenLeft EigenKind = 1 << iota - // EigenRight specifies to compute the right eigenvectors. - EigenRight - // EigenBoth is a convenience value for computing both eigenvectors. - EigenBoth EigenKind = EigenLeft | EigenRight -) - -// Eigen is a type for creating and using the eigenvalue decomposition of a dense matrix. -type Eigen struct { - n int // The size of the factorized matrix. - - kind EigenKind - - values []complex128 - rVectors *CDense - lVectors *CDense -} - -// succFact returns whether the receiver contains a successful factorization. -func (e *Eigen) succFact() bool { - return e.n != 0 -} - -// Factorize computes the eigenvalues of the square matrix a, and optionally -// the eigenvectors. -// -// A right eigenvalue/eigenvector combination is defined by -// A * x_r = λ * x_r -// where x_r is the column vector called an eigenvector, and λ is the corresponding -// eigenvalue. -// -// Similarly, a left eigenvalue/eigenvector combination is defined by -// x_l * A = λ * x_l -// The eigenvalues, but not the eigenvectors, are the same for both decompositions. -// -// Typically eigenvectors refer to right eigenvectors. -// -// In all cases, Factorize computes the eigenvalues of the matrix. kind -// specifies which of the eigenvectors, if any, to compute. See the EigenKind -// documentation for more information. -// Eigen panics if the input matrix is not square. -// -// Factorize returns whether the decomposition succeeded. If the decomposition -// failed, methods that require a successful factorization will panic. -func (e *Eigen) Factorize(a Matrix, kind EigenKind) (ok bool) { - // kill previous factorization. - e.n = 0 - e.kind = 0 - // Copy a because it is modified during the Lapack call. - r, c := a.Dims() - if r != c { - panic(ErrShape) - } - var sd Dense - sd.CloneFrom(a) - - left := kind&EigenLeft != 0 - right := kind&EigenRight != 0 - - var vl, vr Dense - jobvl := lapack.LeftEVNone - jobvr := lapack.RightEVNone - if left { - vl = *NewDense(r, r, nil) - jobvl = lapack.LeftEVCompute - } - if right { - vr = *NewDense(c, c, nil) - jobvr = lapack.RightEVCompute - } - - wr := getFloats(c, false) - defer putFloats(wr) - wi := getFloats(c, false) - defer putFloats(wi) - - work := []float64{0} - lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, -1) - work = getFloats(int(work[0]), false) - first := lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, len(work)) - putFloats(work) - - if first != 0 { - e.values = nil - return false - } - e.n = r - e.kind = kind - - // Construct complex eigenvalues from float64 data. - values := make([]complex128, r) - for i, v := range wr { - values[i] = complex(v, wi[i]) - } - e.values = values - - // Construct complex eigenvectors from float64 data. - var cvl, cvr CDense - if left { - cvl = *NewCDense(r, r, nil) - e.complexEigenTo(&cvl, &vl) - e.lVectors = &cvl - } else { - e.lVectors = nil - } - if right { - cvr = *NewCDense(c, c, nil) - e.complexEigenTo(&cvr, &vr) - e.rVectors = &cvr - } else { - e.rVectors = nil - } - return true -} - -// Kind returns the EigenKind of the decomposition. If no decomposition has been -// computed, Kind returns -1. -func (e *Eigen) Kind() EigenKind { - if !e.succFact() { - return -1 - } - return e.kind -} - -// Values extracts the eigenvalues of the factorized matrix. If dst is -// non-nil, the values are stored in-place into dst. In this case -// dst must have length n, otherwise Values will panic. If dst is -// nil, then a new slice will be allocated of the proper length and -// filed with the eigenvalues. -// -// Values panics if the Eigen decomposition was not successful. -func (e *Eigen) Values(dst []complex128) []complex128 { - if !e.succFact() { - panic(badFact) - } - if dst == nil { - dst = make([]complex128, e.n) - } - if len(dst) != e.n { - panic(ErrSliceLengthMismatch) - } - copy(dst, e.values) - return dst -} - -// complexEigenTo extracts the complex eigenvectors from the real matrix d -// and stores them into the complex matrix dst. -// -// The columns of the returned n×n dense matrix contain the eigenvectors of the -// decomposition in the same order as the eigenvalues. -// If the j-th eigenvalue is real, then -// dst[:,j] = d[:,j], -// and if it is not real, then the elements of the j-th and (j+1)-th columns of d -// form complex conjugate pairs and the eigenvectors are recovered as -// dst[:,j] = d[:,j] + i*d[:,j+1], -// dst[:,j+1] = d[:,j] - i*d[:,j+1], -// where i is the imaginary unit. -func (e *Eigen) complexEigenTo(dst *CDense, d *Dense) { - r, c := d.Dims() - cr, cc := dst.Dims() - if r != cr { - panic("size mismatch") - } - if c != cc { - panic("size mismatch") - } - for j := 0; j < c; j++ { - if imag(e.values[j]) == 0 { - for i := 0; i < r; i++ { - dst.set(i, j, complex(d.at(i, j), 0)) - } - continue - } - for i := 0; i < r; i++ { - real := d.at(i, j) - imag := d.at(i, j+1) - dst.set(i, j, complex(real, imag)) - dst.set(i, j+1, complex(real, -imag)) - } - j++ - } -} - -// VectorsTo returns the right eigenvectors of the decomposition. VectorsTo -// will panic if the right eigenvectors were not computed during the factorization, -// or if the factorization was not successful. -// -// The computed eigenvectors are normalized to have Euclidean norm equal to 1 -// and largest component real. -func (e *Eigen) VectorsTo(dst *CDense) *CDense { - if !e.succFact() { - panic(badFact) - } - if e.kind&EigenRight == 0 { - panic(badNoVect) - } - if dst == nil { - dst = NewCDense(e.n, e.n, nil) - } else { - dst.reuseAs(e.n, e.n) - } - dst.Copy(e.rVectors) - return dst -} - -// LeftVectorsTo returns the left eigenvectors of the decomposition. LeftVectorsTo -// will panic if the left eigenvectors were not computed during the factorization, -// or if the factorization was not successful. -// -// The computed eigenvectors are normalized to have Euclidean norm equal to 1 -// and largest component real. -func (e *Eigen) LeftVectorsTo(dst *CDense) *CDense { - if !e.succFact() { - panic(badFact) - } - if e.kind&EigenLeft == 0 { - panic(badNoVect) - } - if dst == nil { - dst = NewCDense(e.n, e.n, nil) - } else { - dst.reuseAs(e.n, e.n) - } - dst.Copy(e.lVectors) - return dst -} diff --git a/vendor/gonum.org/v1/gonum/mat/errors.go b/vendor/gonum.org/v1/gonum/mat/errors.go deleted file mode 100644 index 0430d126f..000000000 --- a/vendor/gonum.org/v1/gonum/mat/errors.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "fmt" - "runtime" - - "gonum.org/v1/gonum/lapack" -) - -// Condition is the condition number of a matrix. The condition -// number is defined as |A| * |A^-1|. -// -// One important use of Condition is during linear solve routines (finding x such -// that A * x = b). The condition number of A indicates the accuracy of -// the computed solution. A Condition error will be returned if the condition -// number of A is sufficiently large. If A is exactly singular to working precision, -// Condition == ∞, and the solve algorithm may have completed early. If Condition -// is large and finite the solve algorithm will be performed, but the computed -// solution may be innacurate. Due to the nature of finite precision arithmetic, -// the value of Condition is only an approximate test of singularity. -type Condition float64 - -func (c Condition) Error() string { - return fmt.Sprintf("matrix singular or near-singular with condition number %.4e", c) -} - -// ConditionTolerance is the tolerance limit of the condition number. If the -// condition number is above this value, the matrix is considered singular. -const ConditionTolerance = 1e16 - -const ( - // CondNorm is the matrix norm used for computing the condition number by routines - // in the matrix packages. - CondNorm = lapack.MaxRowSum - - // CondNormTrans is the norm used to compute on A^T to get the same result as - // computing CondNorm on A. - CondNormTrans = lapack.MaxColumnSum -) - -const stackTraceBufferSize = 1 << 20 - -// Maybe will recover a panic with a type mat.Error from fn, and return this error -// as the Err field of an ErrorStack. The stack trace for the panicking function will be -// recovered and placed in the StackTrace field. Any other error is re-panicked. -func Maybe(fn func()) (err error) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(Error); ok { - if e.string == "" { - panic("mat: invalid error") - } - buf := make([]byte, stackTraceBufferSize) - n := runtime.Stack(buf, false) - err = ErrorStack{Err: e, StackTrace: string(buf[:n])} - return - } - panic(r) - } - }() - fn() - return -} - -// MaybeFloat will recover a panic with a type mat.Error from fn, and return this error -// as the Err field of an ErrorStack. The stack trace for the panicking function will be -// recovered and placed in the StackTrace field. Any other error is re-panicked. -func MaybeFloat(fn func() float64) (f float64, err error) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(Error); ok { - if e.string == "" { - panic("mat: invalid error") - } - buf := make([]byte, stackTraceBufferSize) - n := runtime.Stack(buf, false) - err = ErrorStack{Err: e, StackTrace: string(buf[:n])} - return - } - panic(r) - } - }() - return fn(), nil -} - -// MaybeComplex will recover a panic with a type mat.Error from fn, and return this error -// as the Err field of an ErrorStack. The stack trace for the panicking function will be -// recovered and placed in the StackTrace field. Any other error is re-panicked. -func MaybeComplex(fn func() complex128) (f complex128, err error) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(Error); ok { - if e.string == "" { - panic("mat: invalid error") - } - buf := make([]byte, stackTraceBufferSize) - n := runtime.Stack(buf, false) - err = ErrorStack{Err: e, StackTrace: string(buf[:n])} - return - } - panic(r) - } - }() - return fn(), nil -} - -// Error represents matrix handling errors. These errors can be recovered by Maybe wrappers. -type Error struct{ string } - -func (err Error) Error() string { return err.string } - -var ( - ErrIndexOutOfRange = Error{"matrix: index out of range"} - ErrRowAccess = Error{"matrix: row index out of range"} - ErrColAccess = Error{"matrix: column index out of range"} - ErrVectorAccess = Error{"matrix: vector index out of range"} - ErrZeroLength = Error{"matrix: zero length in matrix dimension"} - ErrRowLength = Error{"matrix: row length mismatch"} - ErrColLength = Error{"matrix: col length mismatch"} - ErrSquare = Error{"matrix: expect square matrix"} - ErrNormOrder = Error{"matrix: invalid norm order for matrix"} - ErrSingular = Error{"matrix: matrix is singular"} - ErrShape = Error{"matrix: dimension mismatch"} - ErrIllegalStride = Error{"matrix: illegal stride"} - ErrPivot = Error{"matrix: malformed pivot list"} - ErrTriangle = Error{"matrix: triangular storage mismatch"} - ErrTriangleSet = Error{"matrix: triangular set out of bounds"} - ErrBandSet = Error{"matrix: band set out of bounds"} - ErrDiagSet = Error{"matrix: diagonal set out of bounds"} - ErrSliceLengthMismatch = Error{"matrix: input slice length mismatch"} - ErrNotPSD = Error{"matrix: input not positive symmetric definite"} - ErrFailedEigen = Error{"matrix: eigendecomposition not successful"} -) - -// ErrorStack represents matrix handling errors that have been recovered by Maybe wrappers. -type ErrorStack struct { - Err error - - // StackTrace is the stack trace - // recovered by Maybe, MaybeFloat - // or MaybeComplex. - StackTrace string -} - -func (err ErrorStack) Error() string { return err.Err.Error() } diff --git a/vendor/gonum.org/v1/gonum/mat/format.go b/vendor/gonum.org/v1/gonum/mat/format.go deleted file mode 100644 index 9b60cb318..000000000 --- a/vendor/gonum.org/v1/gonum/mat/format.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "fmt" - "strconv" -) - -// Formatted returns a fmt.Formatter for the matrix m using the given options. -func Formatted(m Matrix, options ...FormatOption) fmt.Formatter { - f := formatter{ - matrix: m, - dot: '.', - } - for _, o := range options { - o(&f) - } - return f -} - -type formatter struct { - matrix Matrix - prefix string - margin int - dot byte - squeeze bool -} - -// FormatOption is a functional option for matrix formatting. -type FormatOption func(*formatter) - -// Prefix sets the formatted prefix to the string p. Prefix is a string that is prepended to -// each line of output. -func Prefix(p string) FormatOption { - return func(f *formatter) { f.prefix = p } -} - -// Excerpt sets the maximum number of rows and columns to print at the margins of the matrix -// to m. If m is zero or less all elements are printed. -func Excerpt(m int) FormatOption { - return func(f *formatter) { f.margin = m } -} - -// DotByte sets the dot character to b. The dot character is used to replace zero elements -// if the result is printed with the fmt ' ' verb flag. Without a DotByte option, the default -// dot character is '.'. -func DotByte(b byte) FormatOption { - return func(f *formatter) { f.dot = b } -} - -// Squeeze sets the printing behaviour to minimise column width for each individual column. -func Squeeze() FormatOption { - return func(f *formatter) { f.squeeze = true } -} - -// Format satisfies the fmt.Formatter interface. -func (f formatter) Format(fs fmt.State, c rune) { - if c == 'v' && fs.Flag('#') { - fmt.Fprintf(fs, "%#v", f.matrix) - return - } - format(f.matrix, f.prefix, f.margin, f.dot, f.squeeze, fs, c) -} - -// format prints a pretty representation of m to the fs io.Writer. The format character c -// specifies the numerical representation of elements; valid values are those for float64 -// specified in the fmt package, with their associated flags. In addition to this, a space -// preceding a verb indicates that zero values should be represented by the dot character. -// The printed range of the matrix can be limited by specifying a positive value for margin; -// If margin is greater than zero, only the first and last margin rows/columns of the matrix -// are output. If squeeze is true, column widths are determined on a per-column basis. -// -// format will not provide Go syntax output. -func format(m Matrix, prefix string, margin int, dot byte, squeeze bool, fs fmt.State, c rune) { - rows, cols := m.Dims() - - var printed int - if margin <= 0 { - printed = rows - if cols > printed { - printed = cols - } - } else { - printed = margin - } - - prec, pOk := fs.Precision() - if !pOk { - prec = -1 - } - - var ( - maxWidth int - widths widther - buf, pad []byte - ) - if squeeze { - widths = make(columnWidth, cols) - } else { - widths = new(uniformWidth) - } - switch c { - case 'v', 'e', 'E', 'f', 'F', 'g', 'G': - if c == 'v' { - buf, maxWidth = maxCellWidth(m, 'g', printed, prec, widths) - } else { - buf, maxWidth = maxCellWidth(m, c, printed, prec, widths) - } - default: - fmt.Fprintf(fs, "%%!%c(%T=Dims(%d, %d))", c, m, rows, cols) - return - } - width, _ := fs.Width() - width = max(width, maxWidth) - pad = make([]byte, max(width, 2)) - for i := range pad { - pad[i] = ' ' - } - - first := true - if rows > 2*printed || cols > 2*printed { - first = false - fmt.Fprintf(fs, "Dims(%d, %d)\n", rows, cols) - } - - skipZero := fs.Flag(' ') - for i := 0; i < rows; i++ { - if !first { - fmt.Fprint(fs, prefix) - } - first = false - var el string - switch { - case rows == 1: - fmt.Fprint(fs, "[") - el = "]" - case i == 0: - fmt.Fprint(fs, "⎡") - el = "⎤\n" - case i < rows-1: - fmt.Fprint(fs, "⎢") - el = "⎥\n" - default: - fmt.Fprint(fs, "⎣") - el = "⎦" - } - - for j := 0; j < cols; j++ { - if j >= printed && j < cols-printed { - j = cols - printed - 1 - if i == 0 || i == rows-1 { - fmt.Fprint(fs, "... ... ") - } else { - fmt.Fprint(fs, " ") - } - continue - } - - v := m.At(i, j) - if v == 0 && skipZero { - buf = buf[:1] - buf[0] = dot - } else { - if c == 'v' { - buf = strconv.AppendFloat(buf[:0], v, 'g', prec, 64) - } else { - buf = strconv.AppendFloat(buf[:0], v, byte(c), prec, 64) - } - } - if fs.Flag('-') { - fs.Write(buf) - fs.Write(pad[:widths.width(j)-len(buf)]) - } else { - fs.Write(pad[:widths.width(j)-len(buf)]) - fs.Write(buf) - } - - if j < cols-1 { - fs.Write(pad[:2]) - } - } - - fmt.Fprint(fs, el) - - if i >= printed-1 && i < rows-printed && 2*printed < rows { - i = rows - printed - 1 - fmt.Fprintf(fs, "%s .\n%[1]s .\n%[1]s .\n", prefix) - continue - } - } -} - -func maxCellWidth(m Matrix, c rune, printed, prec int, w widther) ([]byte, int) { - var ( - buf = make([]byte, 0, 64) - rows, cols = m.Dims() - max int - ) - for i := 0; i < rows; i++ { - if i >= printed-1 && i < rows-printed && 2*printed < rows { - i = rows - printed - 1 - continue - } - for j := 0; j < cols; j++ { - if j >= printed && j < cols-printed { - continue - } - - buf = strconv.AppendFloat(buf, m.At(i, j), byte(c), prec, 64) - if len(buf) > max { - max = len(buf) - } - if len(buf) > w.width(j) { - w.setWidth(j, len(buf)) - } - buf = buf[:0] - } - } - return buf, max -} - -type widther interface { - width(i int) int - setWidth(i, w int) -} - -type uniformWidth int - -func (u *uniformWidth) width(_ int) int { return int(*u) } -func (u *uniformWidth) setWidth(_, w int) { *u = uniformWidth(w) } - -type columnWidth []int - -func (c columnWidth) width(i int) int { return c[i] } -func (c columnWidth) setWidth(i, w int) { c[i] = w } diff --git a/vendor/gonum.org/v1/gonum/mat/gsvd.go b/vendor/gonum.org/v1/gonum/mat/gsvd.go deleted file mode 100644 index 2de511a9f..000000000 --- a/vendor/gonum.org/v1/gonum/mat/gsvd.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/floats" - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/lapack64" -) - -// GSVDKind specifies the treatment of singular vectors during a GSVD -// factorization. -type GSVDKind int - -const ( - // GSVDNone specifies that no singular vectors should be computed during - // the decomposition. - GSVDNone GSVDKind = 0 - - // GSVDU specifies that the U singular vectors should be computed during - // the decomposition. - GSVDU GSVDKind = 1 << iota - // GSVDV specifies that the V singular vectors should be computed during - // the decomposition. - GSVDV - // GSVDQ specifies that the Q singular vectors should be computed during - // the decomposition. - GSVDQ - - // GSVDAll is a convenience value for computing all of the singular vectors. - GSVDAll = GSVDU | GSVDV | GSVDQ -) - -// GSVD is a type for creating and using the Generalized Singular Value Decomposition -// (GSVD) of a matrix. -// -// The factorization is a linear transformation of the data sets from the given -// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" -// spaces. -type GSVD struct { - kind GSVDKind - - r, p, c, k, l int - s1, s2 []float64 - a, b, u, v, q blas64.General - - work []float64 - iwork []int -} - -// succFact returns whether the receiver contains a successful factorization. -func (gsvd *GSVD) succFact() bool { - return gsvd.r != 0 -} - -// Factorize computes the generalized singular value decomposition (GSVD) of the input -// the r×c matrix A and the p×c matrix B. The singular values of A and B are computed -// in all cases, while the singular vectors are optionally computed depending on the -// input kind. -// -// The full singular value decomposition (kind == GSVDAll) deconstructs A and B as -// A = U * Σ₁ * [ 0 R ] * Q^T -// -// B = V * Σ₂ * [ 0 R ] * Q^T -// where Σ₁ and Σ₂ are r×(k+l) and p×(k+l) diagonal matrices of singular values, and -// U, V and Q are r×r, p×p and c×c orthogonal matrices of singular vectors. k+l is the -// effective numerical rank of the matrix [ A^T B^T ]^T. -// -// It is frequently not necessary to compute the full GSVD. Computation time and -// storage costs can be reduced using the appropriate kind. Either only the singular -// values can be computed (kind == SVDNone), or in conjunction with specific singular -// vectors (kind bit set according to matrix.GSVDU, matrix.GSVDV and matrix.GSVDQ). -// -// Factorize returns whether the decomposition succeeded. If the decomposition -// failed, routines that require a successful factorization will panic. -func (gsvd *GSVD) Factorize(a, b Matrix, kind GSVDKind) (ok bool) { - // kill the previous decomposition - gsvd.r = 0 - gsvd.kind = 0 - - r, c := a.Dims() - gsvd.r, gsvd.c = r, c - p, c := b.Dims() - gsvd.p = p - if gsvd.c != c { - panic(ErrShape) - } - var jobU, jobV, jobQ lapack.GSVDJob - switch { - default: - panic("gsvd: bad input kind") - case kind == GSVDNone: - jobU = lapack.GSVDNone - jobV = lapack.GSVDNone - jobQ = lapack.GSVDNone - case GSVDAll&kind != 0: - if GSVDU&kind != 0 { - jobU = lapack.GSVDU - gsvd.u = blas64.General{ - Rows: r, - Cols: r, - Stride: r, - Data: use(gsvd.u.Data, r*r), - } - } - if GSVDV&kind != 0 { - jobV = lapack.GSVDV - gsvd.v = blas64.General{ - Rows: p, - Cols: p, - Stride: p, - Data: use(gsvd.v.Data, p*p), - } - } - if GSVDQ&kind != 0 { - jobQ = lapack.GSVDQ - gsvd.q = blas64.General{ - Rows: c, - Cols: c, - Stride: c, - Data: use(gsvd.q.Data, c*c), - } - } - } - - // A and B are destroyed on call, so copy the matrices. - aCopy := DenseCopyOf(a) - bCopy := DenseCopyOf(b) - - gsvd.s1 = use(gsvd.s1, c) - gsvd.s2 = use(gsvd.s2, c) - - gsvd.iwork = useInt(gsvd.iwork, c) - - gsvd.work = use(gsvd.work, 1) - lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, -1, gsvd.iwork) - gsvd.work = use(gsvd.work, int(gsvd.work[0])) - gsvd.k, gsvd.l, ok = lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, len(gsvd.work), gsvd.iwork) - if ok { - gsvd.a = aCopy.mat - gsvd.b = bCopy.mat - gsvd.kind = kind - } - return ok -} - -// Kind returns the GSVDKind of the decomposition. If no decomposition has been -// computed, Kind returns -1. -func (gsvd *GSVD) Kind() GSVDKind { - if !gsvd.succFact() { - return -1 - } - return gsvd.kind -} - -// Rank returns the k and l terms of the rank of [ A^T B^T ]^T. -func (gsvd *GSVD) Rank() (k, l int) { - return gsvd.k, gsvd.l -} - -// GeneralizedValues returns the generalized singular values of the factorized matrices. -// If the input slice is non-nil, the values will be stored in-place into the slice. -// In this case, the slice must have length min(r,c)-k, and GeneralizedValues will -// panic with matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, -// a new slice of the appropriate length will be allocated and returned. -// -// GeneralizedValues will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) GeneralizedValues(v []float64) []float64 { - if !gsvd.succFact() { - panic(badFact) - } - r := gsvd.r - c := gsvd.c - k := gsvd.k - d := min(r, c) - if v == nil { - v = make([]float64, d-k) - } - if len(v) != d-k { - panic(ErrSliceLengthMismatch) - } - floats.DivTo(v, gsvd.s1[k:d], gsvd.s2[k:d]) - return v -} - -// ValuesA returns the singular values of the factorized A matrix. -// If the input slice is non-nil, the values will be stored in-place into the slice. -// In this case, the slice must have length min(r,c)-k, and ValuesA will panic with -// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, -// a new slice of the appropriate length will be allocated and returned. -// -// ValuesA will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) ValuesA(s []float64) []float64 { - if !gsvd.succFact() { - panic(badFact) - } - r := gsvd.r - c := gsvd.c - k := gsvd.k - d := min(r, c) - if s == nil { - s = make([]float64, d-k) - } - if len(s) != d-k { - panic(ErrSliceLengthMismatch) - } - copy(s, gsvd.s1[k:min(r, c)]) - return s -} - -// ValuesB returns the singular values of the factorized B matrix. -// If the input slice is non-nil, the values will be stored in-place into the slice. -// In this case, the slice must have length min(r,c)-k, and ValuesB will panic with -// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, -// a new slice of the appropriate length will be allocated and returned. -// -// ValuesB will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) ValuesB(s []float64) []float64 { - if !gsvd.succFact() { - panic(badFact) - } - r := gsvd.r - c := gsvd.c - k := gsvd.k - d := min(r, c) - if s == nil { - s = make([]float64, d-k) - } - if len(s) != d-k { - panic(ErrSliceLengthMismatch) - } - copy(s, gsvd.s2[k:d]) - return s -} - -// ZeroRTo extracts the matrix [ 0 R ] from the singular value decomposition, storing -// the result in-place into dst. [ 0 R ] is size (k+l)×c. -// If dst is nil, a new matrix is allocated. The resulting ZeroR matrix is returned. -// -// ZeroRTo will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) ZeroRTo(dst *Dense) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - r := gsvd.r - c := gsvd.c - k := gsvd.k - l := gsvd.l - h := min(k+l, r) - if dst == nil { - dst = NewDense(k+l, c, nil) - } else { - dst.reuseAsZeroed(k+l, c) - } - a := Dense{ - mat: gsvd.a, - capRows: r, - capCols: c, - } - dst.Slice(0, h, c-k-l, c).(*Dense). - Copy(a.Slice(0, h, c-k-l, c)) - if r < k+l { - b := Dense{ - mat: gsvd.b, - capRows: gsvd.p, - capCols: c, - } - dst.Slice(r, k+l, c+r-k-l, c).(*Dense). - Copy(b.Slice(r-k, l, c+r-k-l, c)) - } - return dst -} - -// SigmaATo extracts the matrix Σ₁ from the singular value decomposition, storing -// the result in-place into dst. Σ₁ is size r×(k+l). -// If dst is nil, a new matrix is allocated. The resulting SigmaA matrix is returned. -// -// SigmaATo will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) SigmaATo(dst *Dense) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - r := gsvd.r - k := gsvd.k - l := gsvd.l - if dst == nil { - dst = NewDense(r, k+l, nil) - } else { - dst.reuseAsZeroed(r, k+l) - } - for i := 0; i < k; i++ { - dst.set(i, i, 1) - } - for i := k; i < min(r, k+l); i++ { - dst.set(i, i, gsvd.s1[i]) - } - return dst -} - -// SigmaBTo extracts the matrix Σ₂ from the singular value decomposition, storing -// the result in-place into dst. Σ₂ is size p×(k+l). -// If dst is nil, a new matrix is allocated. The resulting SigmaB matrix is returned. -// -// SigmaBTo will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) SigmaBTo(dst *Dense) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - r := gsvd.r - p := gsvd.p - k := gsvd.k - l := gsvd.l - if dst == nil { - dst = NewDense(p, k+l, nil) - } else { - dst.reuseAsZeroed(p, k+l) - } - for i := 0; i < min(l, r-k); i++ { - dst.set(i, i+k, gsvd.s2[k+i]) - } - for i := r - k; i < l; i++ { - dst.set(i, i+k, 1) - } - return dst -} - -// UTo extracts the matrix U from the singular value decomposition, storing -// the result in-place into dst. U is size r×r. -// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. -// -// UTo will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) UTo(dst *Dense) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - if gsvd.kind&GSVDU == 0 { - panic("mat: improper GSVD kind") - } - r := gsvd.u.Rows - c := gsvd.u.Cols - if dst == nil { - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(r, c) - } - - tmp := &Dense{ - mat: gsvd.u, - capRows: r, - capCols: c, - } - dst.Copy(tmp) - return dst -} - -// VTo extracts the matrix V from the singular value decomposition, storing -// the result in-place into dst. V is size p×p. -// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. -// -// VTo will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) VTo(dst *Dense) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - if gsvd.kind&GSVDV == 0 { - panic("mat: improper GSVD kind") - } - r := gsvd.v.Rows - c := gsvd.v.Cols - if dst == nil { - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(r, c) - } - - tmp := &Dense{ - mat: gsvd.v, - capRows: r, - capCols: c, - } - dst.Copy(tmp) - return dst -} - -// QTo extracts the matrix Q from the singular value decomposition, storing -// the result in-place into dst. Q is size c×c. -// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. -// -// QTo will panic if the receiver does not contain a successful factorization. -func (gsvd *GSVD) QTo(dst *Dense) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - if gsvd.kind&GSVDQ == 0 { - panic("mat: improper GSVD kind") - } - r := gsvd.q.Rows - c := gsvd.q.Cols - if dst == nil { - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(r, c) - } - - tmp := &Dense{ - mat: gsvd.q, - capRows: r, - capCols: c, - } - dst.Copy(tmp) - return dst -} diff --git a/vendor/gonum.org/v1/gonum/mat/hogsvd.go b/vendor/gonum.org/v1/gonum/mat/hogsvd.go deleted file mode 100644 index 61bd94ad4..000000000 --- a/vendor/gonum.org/v1/gonum/mat/hogsvd.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "errors" - - "gonum.org/v1/gonum/blas/blas64" -) - -// HOGSVD is a type for creating and using the Higher Order Generalized Singular Value -// Decomposition (HOGSVD) of a set of matrices. -// -// The factorization is a linear transformation of the data sets from the given -// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" -// spaces. -type HOGSVD struct { - n int - v *Dense - b []Dense - - err error -} - -// succFact returns whether the receiver contains a successful factorization. -func (gsvd *HOGSVD) succFact() bool { - return gsvd.n != 0 -} - -// Factorize computes the higher order generalized singular value decomposition (HOGSVD) -// of the n input r_i×c column tall matrices in m. HOGSV extends the GSVD case from 2 to n -// input matrices. -// -// M_0 = U_0 * Σ_0 * V^T -// M_1 = U_1 * Σ_1 * V^T -// . -// . -// . -// M_{n-1} = U_{n-1} * Σ_{n-1} * V^T -// -// where U_i are r_i×c matrices of singular vectors, Σ are c×c matrices singular values, and V -// is a c×c matrix of singular vectors. -// -// Factorize returns whether the decomposition succeeded. If the decomposition -// failed, routines that require a successful factorization will panic. -func (gsvd *HOGSVD) Factorize(m ...Matrix) (ok bool) { - // Factorize performs the HOGSVD factorisation - // essentially as described by Ponnapalli et al. - // https://doi.org/10.1371/journal.pone.0028072 - - if len(m) < 2 { - panic("hogsvd: too few matrices") - } - gsvd.n = 0 - - r, c := m[0].Dims() - a := make([]Cholesky, len(m)) - var ts SymDense - for i, d := range m { - rd, cd := d.Dims() - if rd < cd { - gsvd.err = ErrShape - return false - } - if rd > r { - r = rd - } - if cd != c { - panic(ErrShape) - } - ts.Reset() - ts.SymOuterK(1, d.T()) - ok = a[i].Factorize(&ts) - if !ok { - gsvd.err = errors.New("hogsvd: cholesky decomposition failed") - return false - } - } - - s := getWorkspace(c, c, true) - defer putWorkspace(s) - sij := getWorkspace(c, c, false) - defer putWorkspace(sij) - for i, ai := range a { - for _, aj := range a[i+1:] { - gsvd.err = ai.SolveCholTo(sij, &aj) - if gsvd.err != nil { - return false - } - s.Add(s, sij) - - gsvd.err = aj.SolveCholTo(sij, &ai) - if gsvd.err != nil { - return false - } - s.Add(s, sij) - } - } - s.Scale(1/float64(len(m)*(len(m)-1)), s) - - var eig Eigen - ok = eig.Factorize(s.T(), EigenRight) - if !ok { - gsvd.err = errors.New("hogsvd: eigen decomposition failed") - return false - } - vc := eig.VectorsTo(nil) - // vc is guaranteed to have real eigenvalues. - rc, cc := vc.Dims() - v := NewDense(rc, cc, nil) - for i := 0; i < rc; i++ { - for j := 0; j < cc; j++ { - a := vc.At(i, j) - v.set(i, j, real(a)) - } - } - // Rescale the columns of v by their Frobenius norms. - // Work done in cv is reflected in v. - var cv VecDense - for j := 0; j < c; j++ { - cv.ColViewOf(v, j) - cv.ScaleVec(1/blas64.Nrm2(cv.mat), &cv) - } - - b := make([]Dense, len(m)) - biT := getWorkspace(c, r, false) - defer putWorkspace(biT) - for i, d := range m { - // All calls to reset will leave a zeroed - // matrix with capacity to store the result - // without additional allocation. - biT.Reset() - gsvd.err = biT.Solve(v, d.T()) - if gsvd.err != nil { - return false - } - b[i].CloneFrom(biT.T()) - } - - gsvd.n = len(m) - gsvd.v = v - gsvd.b = b - return true -} - -// Err returns the reason for a factorization failure. -func (gsvd *HOGSVD) Err() error { - return gsvd.err -} - -// Len returns the number of matrices that have been factorized. If Len returns -// zero, the factorization was not successful. -func (gsvd *HOGSVD) Len() int { - return gsvd.n -} - -// UTo extracts the matrix U_n from the singular value decomposition, storing -// the result in-place into dst. U_n is size r×c. -// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. -// -// UTo will panic if the receiver does not contain a successful factorization. -func (gsvd *HOGSVD) UTo(dst *Dense, n int) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - if n < 0 || gsvd.n <= n { - panic("hogsvd: invalid index") - } - - if dst == nil { - r, c := gsvd.b[n].Dims() - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(gsvd.b[n].Dims()) - } - dst.Copy(&gsvd.b[n]) - var v VecDense - for j, f := range gsvd.Values(nil, n) { - v.ColViewOf(dst, j) - v.ScaleVec(1/f, &v) - } - return dst -} - -// Values returns the nth set of singular values of the factorized system. -// If the input slice is non-nil, the values will be stored in-place into the slice. -// In this case, the slice must have length c, and Values will panic with -// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, -// a new slice of the appropriate length will be allocated and returned. -// -// Values will panic if the receiver does not contain a successful factorization. -func (gsvd *HOGSVD) Values(s []float64, n int) []float64 { - if !gsvd.succFact() { - panic(badFact) - } - if n < 0 || gsvd.n <= n { - panic("hogsvd: invalid index") - } - - _, c := gsvd.b[n].Dims() - if s == nil { - s = make([]float64, c) - } else if len(s) != c { - panic(ErrSliceLengthMismatch) - } - var v VecDense - for j := 0; j < c; j++ { - v.ColViewOf(&gsvd.b[n], j) - s[j] = blas64.Nrm2(v.mat) - } - return s -} - -// VTo extracts the matrix V from the singular value decomposition, storing -// the result in-place into dst. V is size c×c. -// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. -// -// VTo will panic if the receiver does not contain a successful factorization. -func (gsvd *HOGSVD) VTo(dst *Dense) *Dense { - if !gsvd.succFact() { - panic(badFact) - } - if dst == nil { - r, c := gsvd.v.Dims() - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(gsvd.v.Dims()) - } - dst.Copy(gsvd.v) - return dst -} diff --git a/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go b/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go deleted file mode 100644 index 59815a676..000000000 --- a/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file must be kept in sync with index_no_bound_checks.go. - -// +build bounds - -package mat - -// At returns the element at row i, column j. -func (m *Dense) At(i, j int) float64 { - return m.at(i, j) -} - -func (m *Dense) at(i, j int) float64 { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - return m.mat.Data[i*m.mat.Stride+j] -} - -// Set sets the element at row i, column j to the value v. -func (m *Dense) Set(i, j int, v float64) { - m.set(i, j, v) -} - -func (m *Dense) set(i, j int, v float64) { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - m.mat.Data[i*m.mat.Stride+j] = v -} - -// At returns the element at row i, column j. -func (m *CDense) At(i, j int) complex128 { - return m.at(i, j) -} - -func (m *CDense) at(i, j int) complex128 { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - return m.mat.Data[i*m.mat.Stride+j] -} - -// Set sets the element at row i, column j to the value v. -func (m *CDense) Set(i, j int, v complex128) { - m.set(i, j, v) -} - -func (m *CDense) set(i, j int, v complex128) { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - m.mat.Data[i*m.mat.Stride+j] = v -} - -// At returns the element at row i. -// It panics if i is out of bounds or if j is not zero. -func (v *VecDense) At(i, j int) float64 { - if j != 0 { - panic(ErrColAccess) - } - return v.at(i) -} - -// AtVec returns the element at row i. -// It panics if i is out of bounds. -func (v *VecDense) AtVec(i int) float64 { - return v.at(i) -} - -func (v *VecDense) at(i int) float64 { - if uint(i) >= uint(v.mat.N) { - panic(ErrRowAccess) - } - return v.mat.Data[i*v.mat.Inc] -} - -// SetVec sets the element at row i to the value val. -// It panics if i is out of bounds. -func (v *VecDense) SetVec(i int, val float64) { - v.setVec(i, val) -} - -func (v *VecDense) setVec(i int, val float64) { - if uint(i) >= uint(v.mat.N) { - panic(ErrVectorAccess) - } - v.mat.Data[i*v.mat.Inc] = val -} - -// At returns the element at row i and column j. -func (t *SymDense) At(i, j int) float64 { - return t.at(i, j) -} - -func (t *SymDense) at(i, j int) float64 { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - if i > j { - i, j = j, i - } - return t.mat.Data[i*t.mat.Stride+j] -} - -// SetSym sets the elements at (i,j) and (j,i) to the value v. -func (t *SymDense) SetSym(i, j int, v float64) { - t.set(i, j, v) -} - -func (t *SymDense) set(i, j int, v float64) { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - if i > j { - i, j = j, i - } - t.mat.Data[i*t.mat.Stride+j] = v -} - -// At returns the element at row i, column j. -func (t *TriDense) At(i, j int) float64 { - return t.at(i, j) -} - -func (t *TriDense) at(i, j int) float64 { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - isUpper := t.isUpper() - if (isUpper && i > j) || (!isUpper && i < j) { - return 0 - } - return t.mat.Data[i*t.mat.Stride+j] -} - -// SetTri sets the element of the triangular matrix at row i, column j to the value v. -// It panics if the location is outside the appropriate half of the matrix. -func (t *TriDense) SetTri(i, j int, v float64) { - t.set(i, j, v) -} - -func (t *TriDense) set(i, j int, v float64) { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - isUpper := t.isUpper() - if (isUpper && i > j) || (!isUpper && i < j) { - panic(ErrTriangleSet) - } - t.mat.Data[i*t.mat.Stride+j] = v -} - -// At returns the element at row i, column j. -func (b *BandDense) At(i, j int) float64 { - return b.at(i, j) -} - -func (b *BandDense) at(i, j int) float64 { - if uint(i) >= uint(b.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(b.mat.Cols) { - panic(ErrColAccess) - } - pj := j + b.mat.KL - i - if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { - return 0 - } - return b.mat.Data[i*b.mat.Stride+pj] -} - -// SetBand sets the element at row i, column j to the value v. -// It panics if the location is outside the appropriate region of the matrix. -func (b *BandDense) SetBand(i, j int, v float64) { - b.set(i, j, v) -} - -func (b *BandDense) set(i, j int, v float64) { - if uint(i) >= uint(b.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(b.mat.Cols) { - panic(ErrColAccess) - } - pj := j + b.mat.KL - i - if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { - panic(ErrBandSet) - } - b.mat.Data[i*b.mat.Stride+pj] = v -} - -// At returns the element at row i, column j. -func (s *SymBandDense) At(i, j int) float64 { - return s.at(i, j) -} - -func (s *SymBandDense) at(i, j int) float64 { - if uint(i) >= uint(s.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(s.mat.N) { - panic(ErrColAccess) - } - if i > j { - i, j = j, i - } - pj := j - i - if s.mat.K+1 <= pj { - return 0 - } - return s.mat.Data[i*s.mat.Stride+pj] -} - -// SetSymBand sets the element at row i, column j to the value v. -// It panics if the location is outside the appropriate region of the matrix. -func (s *SymBandDense) SetSymBand(i, j int, v float64) { - s.set(i, j, v) -} - -func (s *SymBandDense) set(i, j int, v float64) { - if uint(i) >= uint(s.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(s.mat.N) { - panic(ErrColAccess) - } - if i > j { - i, j = j, i - } - pj := j - i - if s.mat.K+1 <= pj { - panic(ErrBandSet) - } - s.mat.Data[i*s.mat.Stride+pj] = v -} - -func (t *TriBandDense) At(i, j int) float64 { - return t.at(i, j) -} - -func (t *TriBandDense) at(i, j int) float64 { - // TODO(btracey): Support Diag field, see #692. - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - isUpper := t.isUpper() - if (isUpper && i > j) || (!isUpper && i < j) { - return 0 - } - kl, ku := t.mat.K, 0 - if isUpper { - kl, ku = 0, t.mat.K - } - pj := j + kl - i - if pj < 0 || kl+ku+1 <= pj { - return 0 - } - return t.mat.Data[i*t.mat.Stride+pj] -} - -func (t *TriBandDense) SetTriBand(i, j int, v float64) { - t.setTriBand(i, j, v) -} - -func (t *TriBandDense) setTriBand(i, j int, v float64) { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - isUpper := t.isUpper() - if (isUpper && i > j) || (!isUpper && i < j) { - panic(ErrTriangleSet) - } - kl, ku := t.mat.K, 0 - if isUpper { - kl, ku = 0, t.mat.K - } - pj := j + kl - i - if pj < 0 || kl+ku+1 <= pj { - panic(ErrBandSet) - } - // TODO(btracey): Support Diag field, see #692. - t.mat.Data[i*t.mat.Stride+pj] = v -} - -// At returns the element at row i, column j. -func (d *DiagDense) At(i, j int) float64 { - return d.at(i, j) -} - -func (d *DiagDense) at(i, j int) float64 { - if uint(i) >= uint(d.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(d.mat.N) { - panic(ErrColAccess) - } - if i != j { - return 0 - } - return d.mat.Data[i*d.mat.Inc] -} - -// SetDiag sets the element at row i, column i to the value v. -// It panics if the location is outside the appropriate region of the matrix. -func (d *DiagDense) SetDiag(i int, v float64) { - d.setDiag(i, v) -} - -func (d *DiagDense) setDiag(i int, v float64) { - if uint(i) >= uint(d.mat.N) { - panic(ErrRowAccess) - } - d.mat.Data[i*d.mat.Inc] = v -} diff --git a/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go b/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go deleted file mode 100644 index 051f8437a..000000000 --- a/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file must be kept in sync with index_bound_checks.go. - -// +build !bounds - -package mat - -// At returns the element at row i, column j. -func (m *Dense) At(i, j int) float64 { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - return m.at(i, j) -} - -func (m *Dense) at(i, j int) float64 { - return m.mat.Data[i*m.mat.Stride+j] -} - -// Set sets the element at row i, column j to the value v. -func (m *Dense) Set(i, j int, v float64) { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - m.set(i, j, v) -} - -func (m *Dense) set(i, j int, v float64) { - m.mat.Data[i*m.mat.Stride+j] = v -} - -// At returns the element at row i, column j. -func (m *CDense) At(i, j int) complex128 { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - return m.at(i, j) -} - -func (m *CDense) at(i, j int) complex128 { - return m.mat.Data[i*m.mat.Stride+j] -} - -// Set sets the element at row i, column j to the value v. -func (m *CDense) Set(i, j int, v complex128) { - if uint(i) >= uint(m.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(m.mat.Cols) { - panic(ErrColAccess) - } - m.set(i, j, v) -} - -func (m *CDense) set(i, j int, v complex128) { - m.mat.Data[i*m.mat.Stride+j] = v -} - -// At returns the element at row i. -// It panics if i is out of bounds or if j is not zero. -func (v *VecDense) At(i, j int) float64 { - if uint(i) >= uint(v.mat.N) { - panic(ErrRowAccess) - } - if j != 0 { - panic(ErrColAccess) - } - return v.at(i) -} - -// AtVec returns the element at row i. -// It panics if i is out of bounds. -func (v *VecDense) AtVec(i int) float64 { - if uint(i) >= uint(v.mat.N) { - panic(ErrRowAccess) - } - return v.at(i) -} - -func (v *VecDense) at(i int) float64 { - return v.mat.Data[i*v.mat.Inc] -} - -// SetVec sets the element at row i to the value val. -// It panics if i is out of bounds. -func (v *VecDense) SetVec(i int, val float64) { - if uint(i) >= uint(v.mat.N) { - panic(ErrVectorAccess) - } - v.setVec(i, val) -} - -func (v *VecDense) setVec(i int, val float64) { - v.mat.Data[i*v.mat.Inc] = val -} - -// At returns the element at row i and column j. -func (s *SymDense) At(i, j int) float64 { - if uint(i) >= uint(s.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(s.mat.N) { - panic(ErrColAccess) - } - return s.at(i, j) -} - -func (s *SymDense) at(i, j int) float64 { - if i > j { - i, j = j, i - } - return s.mat.Data[i*s.mat.Stride+j] -} - -// SetSym sets the elements at (i,j) and (j,i) to the value v. -func (s *SymDense) SetSym(i, j int, v float64) { - if uint(i) >= uint(s.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(s.mat.N) { - panic(ErrColAccess) - } - s.set(i, j, v) -} - -func (s *SymDense) set(i, j int, v float64) { - if i > j { - i, j = j, i - } - s.mat.Data[i*s.mat.Stride+j] = v -} - -// At returns the element at row i, column j. -func (t *TriDense) At(i, j int) float64 { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - return t.at(i, j) -} - -func (t *TriDense) at(i, j int) float64 { - isUpper := t.triKind() - if (isUpper && i > j) || (!isUpper && i < j) { - return 0 - } - return t.mat.Data[i*t.mat.Stride+j] -} - -// SetTri sets the element at row i, column j to the value v. -// It panics if the location is outside the appropriate half of the matrix. -func (t *TriDense) SetTri(i, j int, v float64) { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - isUpper := t.isUpper() - if (isUpper && i > j) || (!isUpper && i < j) { - panic(ErrTriangleSet) - } - t.set(i, j, v) -} - -func (t *TriDense) set(i, j int, v float64) { - t.mat.Data[i*t.mat.Stride+j] = v -} - -// At returns the element at row i, column j. -func (b *BandDense) At(i, j int) float64 { - if uint(i) >= uint(b.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(b.mat.Cols) { - panic(ErrColAccess) - } - return b.at(i, j) -} - -func (b *BandDense) at(i, j int) float64 { - pj := j + b.mat.KL - i - if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { - return 0 - } - return b.mat.Data[i*b.mat.Stride+pj] -} - -// SetBand sets the element at row i, column j to the value v. -// It panics if the location is outside the appropriate region of the matrix. -func (b *BandDense) SetBand(i, j int, v float64) { - if uint(i) >= uint(b.mat.Rows) { - panic(ErrRowAccess) - } - if uint(j) >= uint(b.mat.Cols) { - panic(ErrColAccess) - } - pj := j + b.mat.KL - i - if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { - panic(ErrBandSet) - } - b.set(i, j, v) -} - -func (b *BandDense) set(i, j int, v float64) { - pj := j + b.mat.KL - i - b.mat.Data[i*b.mat.Stride+pj] = v -} - -// At returns the element at row i, column j. -func (s *SymBandDense) At(i, j int) float64 { - if uint(i) >= uint(s.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(s.mat.N) { - panic(ErrColAccess) - } - return s.at(i, j) -} - -func (s *SymBandDense) at(i, j int) float64 { - if i > j { - i, j = j, i - } - pj := j - i - if s.mat.K+1 <= pj { - return 0 - } - return s.mat.Data[i*s.mat.Stride+pj] -} - -// SetSymBand sets the element at row i, column j to the value v. -// It panics if the location is outside the appropriate region of the matrix. -func (s *SymBandDense) SetSymBand(i, j int, v float64) { - if uint(i) >= uint(s.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(s.mat.N) { - panic(ErrColAccess) - } - s.set(i, j, v) -} - -func (s *SymBandDense) set(i, j int, v float64) { - if i > j { - i, j = j, i - } - pj := j - i - if s.mat.K+1 <= pj { - panic(ErrBandSet) - } - s.mat.Data[i*s.mat.Stride+pj] = v -} - -func (t *TriBandDense) At(i, j int) float64 { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - return t.at(i, j) -} - -func (t *TriBandDense) at(i, j int) float64 { - // TODO(btracey): Support Diag field, see #692. - isUpper := t.isUpper() - if (isUpper && i > j) || (!isUpper && i < j) { - return 0 - } - kl := t.mat.K - ku := 0 - if isUpper { - ku = t.mat.K - kl = 0 - } - pj := j + kl - i - if pj < 0 || kl+ku+1 <= pj { - return 0 - } - return t.mat.Data[i*t.mat.Stride+pj] -} - -func (t *TriBandDense) SetTriBand(i, j int, v float64) { - if uint(i) >= uint(t.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(t.mat.N) { - panic(ErrColAccess) - } - isUpper := t.isUpper() - if (isUpper && i > j) || (!isUpper && i < j) { - panic(ErrTriangleSet) - } - kl, ku := t.mat.K, 0 - if isUpper { - kl, ku = 0, t.mat.K - } - pj := j + kl - i - if pj < 0 || kl+ku+1 <= pj { - panic(ErrBandSet) - } - // TODO(btracey): Support Diag field, see #692. - t.mat.Data[i*t.mat.Stride+pj] = v -} - -func (t *TriBandDense) setTriBand(i, j int, v float64) { - var kl int - if !t.isUpper() { - kl = t.mat.K - } - pj := j + kl - i - t.mat.Data[i*t.mat.Stride+pj] = v -} - -// At returns the element at row i, column j. -func (d *DiagDense) At(i, j int) float64 { - if uint(i) >= uint(d.mat.N) { - panic(ErrRowAccess) - } - if uint(j) >= uint(d.mat.N) { - panic(ErrColAccess) - } - return d.at(i, j) -} - -func (d *DiagDense) at(i, j int) float64 { - if i != j { - return 0 - } - return d.mat.Data[i*d.mat.Inc] -} - -// SetDiag sets the element at row i, column i to the value v. -// It panics if the location is outside the appropriate region of the matrix. -func (d *DiagDense) SetDiag(i int, v float64) { - if uint(i) >= uint(d.mat.N) { - panic(ErrRowAccess) - } - d.setDiag(i, v) -} - -func (d *DiagDense) setDiag(i int, v float64) { - d.mat.Data[i*d.mat.Inc] = v -} diff --git a/vendor/gonum.org/v1/gonum/mat/inner.go b/vendor/gonum.org/v1/gonum/mat/inner.go deleted file mode 100644 index 762851815..000000000 --- a/vendor/gonum.org/v1/gonum/mat/inner.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/internal/asm/f64" -) - -// Inner computes the generalized inner product -// x^T A y -// between the vectors x and y with matrix A, where x and y are treated as -// column vectors. -// -// This is only a true inner product if A is symmetric positive definite, though -// the operation works for any matrix A. -// -// Inner panics if x.Len != m or y.Len != n when A is an m x n matrix. -func Inner(x Vector, a Matrix, y Vector) float64 { - m, n := a.Dims() - if x.Len() != m { - panic(ErrShape) - } - if y.Len() != n { - panic(ErrShape) - } - if m == 0 || n == 0 { - return 0 - } - - var sum float64 - - switch a := a.(type) { - case RawSymmetricer: - amat := a.RawSymmetric() - if amat.Uplo != blas.Upper { - // Panic as a string not a mat.Error. - panic(badSymTriangle) - } - var xmat, ymat blas64.Vector - if xrv, ok := x.(RawVectorer); ok { - xmat = xrv.RawVector() - } else { - break - } - if yrv, ok := y.(RawVectorer); ok { - ymat = yrv.RawVector() - } else { - break - } - for i := 0; i < x.Len(); i++ { - xi := x.AtVec(i) - if xi != 0 { - if ymat.Inc == 1 { - sum += xi * f64.DotUnitary( - amat.Data[i*amat.Stride+i:i*amat.Stride+n], - ymat.Data[i:], - ) - } else { - sum += xi * f64.DotInc( - amat.Data[i*amat.Stride+i:i*amat.Stride+n], - ymat.Data[i*ymat.Inc:], uintptr(n-i), - 1, uintptr(ymat.Inc), - 0, 0, - ) - } - } - yi := y.AtVec(i) - if i != n-1 && yi != 0 { - if xmat.Inc == 1 { - sum += yi * f64.DotUnitary( - amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], - xmat.Data[i+1:], - ) - } else { - sum += yi * f64.DotInc( - amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], - xmat.Data[(i+1)*xmat.Inc:], uintptr(n-i-1), - 1, uintptr(xmat.Inc), - 0, 0, - ) - } - } - } - return sum - case RawMatrixer: - amat := a.RawMatrix() - var ymat blas64.Vector - if yrv, ok := y.(RawVectorer); ok { - ymat = yrv.RawVector() - } else { - break - } - for i := 0; i < x.Len(); i++ { - xi := x.AtVec(i) - if xi != 0 { - if ymat.Inc == 1 { - sum += xi * f64.DotUnitary( - amat.Data[i*amat.Stride:i*amat.Stride+n], - ymat.Data, - ) - } else { - sum += xi * f64.DotInc( - amat.Data[i*amat.Stride:i*amat.Stride+n], - ymat.Data, uintptr(n), - 1, uintptr(ymat.Inc), - 0, 0, - ) - } - } - } - return sum - } - for i := 0; i < x.Len(); i++ { - xi := x.AtVec(i) - for j := 0; j < y.Len(); j++ { - sum += xi * a.At(i, j) * y.AtVec(j) - } - } - return sum -} diff --git a/vendor/gonum.org/v1/gonum/mat/io.go b/vendor/gonum.org/v1/gonum/mat/io.go deleted file mode 100644 index 7f7ef0703..000000000 --- a/vendor/gonum.org/v1/gonum/mat/io.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -// version is the current on-disk codec version. -const version uint32 = 0x1 - -// maxLen is the biggest slice/array len one can create on a 32/64b platform. -const maxLen = int64(int(^uint(0) >> 1)) - -var ( - headerSize = binary.Size(storage{}) - sizeInt64 = binary.Size(int64(0)) - sizeFloat64 = binary.Size(float64(0)) - - errWrongType = errors.New("mat: wrong data type") - - errTooBig = errors.New("mat: resulting data slice too big") - errTooSmall = errors.New("mat: input slice too small") - errBadBuffer = errors.New("mat: data buffer size mismatch") - errBadSize = errors.New("mat: invalid dimension") -) - -// Type encoding scheme: -// -// Type Form Packing Uplo Unit Rows Columns kU kL -// uint8 [GST] uint8 [BPF] uint8 [AUL] bool int64 int64 int64 int64 -// General 'G' 'F' 'A' false r c 0 0 -// Band 'G' 'B' 'A' false r c kU kL -// Symmetric 'S' 'F' ul false n n 0 0 -// SymmetricBand 'S' 'B' ul false n n k k -// SymmetricPacked 'S' 'P' ul false n n 0 0 -// Triangular 'T' 'F' ul Diag==Unit n n 0 0 -// TriangularBand 'T' 'B' ul Diag==Unit n n k k -// TriangularPacked 'T' 'P' ul Diag==Unit n n 0 0 -// -// G - general, S - symmetric, T - triangular -// F - full, B - band, P - packed -// A - all, U - upper, L - lower - -// MarshalBinary encodes the receiver into a binary form and returns the result. -// -// Dense is little-endian encoded as follows: -// 0 - 3 Version = 1 (uint32) -// 4 'G' (byte) -// 5 'F' (byte) -// 6 'A' (byte) -// 7 0 (byte) -// 8 - 15 number of rows (int64) -// 16 - 23 number of columns (int64) -// 24 - 31 0 (int64) -// 32 - 39 0 (int64) -// 40 - .. matrix data elements (float64) -// [0,0] [0,1] ... [0,ncols-1] -// [1,0] [1,1] ... [1,ncols-1] -// ... -// [nrows-1,0] ... [nrows-1,ncols-1] -func (m Dense) MarshalBinary() ([]byte, error) { - bufLen := int64(headerSize) + int64(m.mat.Rows)*int64(m.mat.Cols)*int64(sizeFloat64) - if bufLen <= 0 { - // bufLen is too big and has wrapped around. - return nil, errTooBig - } - - header := storage{ - Form: 'G', Packing: 'F', Uplo: 'A', - Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), - Version: version, - } - buf := make([]byte, bufLen) - n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) - if err != nil { - return buf[:n], err - } - - p := headerSize - r, c := m.Dims() - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(m.at(i, j))) - p += sizeFloat64 - } - } - - return buf, nil -} - -// MarshalBinaryTo encodes the receiver into a binary form and writes it into w. -// MarshalBinaryTo returns the number of bytes written into w and an error, if any. -// -// See MarshalBinary for the on-disk layout. -func (m Dense) MarshalBinaryTo(w io.Writer) (int, error) { - header := storage{ - Form: 'G', Packing: 'F', Uplo: 'A', - Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), - Version: version, - } - n, err := header.marshalBinaryTo(w) - if err != nil { - return n, err - } - - r, c := m.Dims() - var b [8]byte - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - binary.LittleEndian.PutUint64(b[:], math.Float64bits(m.at(i, j))) - nn, err := w.Write(b[:]) - n += nn - if err != nil { - return n, err - } - } - } - - return n, nil -} - -// UnmarshalBinary decodes the binary form into the receiver. -// It panics if the receiver is a non-zero Dense matrix. -// -// See MarshalBinary for the on-disk layout. -// -// Limited checks on the validity of the binary input are performed: -// - matrix.ErrShape is returned if the number of rows or columns is negative, -// - an error is returned if the resulting Dense matrix is too -// big for the current architecture (e.g. a 16GB matrix written by a -// 64b application and read back from a 32b application.) -// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so -// it should not be used on untrusted data. -func (m *Dense) UnmarshalBinary(data []byte) error { - if !m.IsZero() { - panic("mat: unmarshal into non-zero matrix") - } - - if len(data) < headerSize { - return errTooSmall - } - - var header storage - err := header.unmarshalBinary(data[:headerSize]) - if err != nil { - return err - } - rows := header.Rows - cols := header.Cols - header.Version = 0 - header.Rows = 0 - header.Cols = 0 - if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { - return errWrongType - } - if rows < 0 || cols < 0 { - return errBadSize - } - size := rows * cols - if size == 0 { - return ErrZeroLength - } - if int(size) < 0 || size > maxLen { - return errTooBig - } - if len(data) != headerSize+int(rows*cols)*sizeFloat64 { - return errBadBuffer - } - - p := headerSize - m.reuseAs(int(rows), int(cols)) - for i := range m.mat.Data { - m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) - p += sizeFloat64 - } - - return nil -} - -// UnmarshalBinaryFrom decodes the binary form into the receiver and returns -// the number of bytes read and an error if any. -// It panics if the receiver is a non-zero Dense matrix. -// -// See MarshalBinary for the on-disk layout. -// -// Limited checks on the validity of the binary input are performed: -// - matrix.ErrShape is returned if the number of rows or columns is negative, -// - an error is returned if the resulting Dense matrix is too -// big for the current architecture (e.g. a 16GB matrix written by a -// 64b application and read back from a 32b application.) -// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so -// it should not be used on untrusted data. -func (m *Dense) UnmarshalBinaryFrom(r io.Reader) (int, error) { - if !m.IsZero() { - panic("mat: unmarshal into non-zero matrix") - } - - var header storage - n, err := header.unmarshalBinaryFrom(r) - if err != nil { - return n, err - } - rows := header.Rows - cols := header.Cols - header.Version = 0 - header.Rows = 0 - header.Cols = 0 - if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { - return n, errWrongType - } - if rows < 0 || cols < 0 { - return n, errBadSize - } - size := rows * cols - if size == 0 { - return n, ErrZeroLength - } - if int(size) < 0 || size > maxLen { - return n, errTooBig - } - - m.reuseAs(int(rows), int(cols)) - var b [8]byte - for i := range m.mat.Data { - nn, err := readFull(r, b[:]) - n += nn - if err != nil { - if err == io.EOF { - return n, io.ErrUnexpectedEOF - } - return n, err - } - m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) - } - - return n, nil -} - -// MarshalBinary encodes the receiver into a binary form and returns the result. -// -// VecDense is little-endian encoded as follows: -// -// 0 - 3 Version = 1 (uint32) -// 4 'G' (byte) -// 5 'F' (byte) -// 6 'A' (byte) -// 7 0 (byte) -// 8 - 15 number of elements (int64) -// 16 - 23 1 (int64) -// 24 - 31 0 (int64) -// 32 - 39 0 (int64) -// 40 - .. vector's data elements (float64) -func (v VecDense) MarshalBinary() ([]byte, error) { - bufLen := int64(headerSize) + int64(v.mat.N)*int64(sizeFloat64) - if bufLen <= 0 { - // bufLen is too big and has wrapped around. - return nil, errTooBig - } - - header := storage{ - Form: 'G', Packing: 'F', Uplo: 'A', - Rows: int64(v.mat.N), Cols: 1, - Version: version, - } - buf := make([]byte, bufLen) - n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) - if err != nil { - return buf[:n], err - } - - p := headerSize - for i := 0; i < v.mat.N; i++ { - binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(v.at(i))) - p += sizeFloat64 - } - - return buf, nil -} - -// MarshalBinaryTo encodes the receiver into a binary form, writes it to w and -// returns the number of bytes written and an error if any. -// -// See MarshalBainry for the on-disk format. -func (v VecDense) MarshalBinaryTo(w io.Writer) (int, error) { - header := storage{ - Form: 'G', Packing: 'F', Uplo: 'A', - Rows: int64(v.mat.N), Cols: 1, - Version: version, - } - n, err := header.marshalBinaryTo(w) - if err != nil { - return n, err - } - - var buf [8]byte - for i := 0; i < v.mat.N; i++ { - binary.LittleEndian.PutUint64(buf[:], math.Float64bits(v.at(i))) - nn, err := w.Write(buf[:]) - n += nn - if err != nil { - return n, err - } - } - - return n, nil -} - -// UnmarshalBinary decodes the binary form into the receiver. -// It panics if the receiver is a non-zero VecDense. -// -// See MarshalBinary for the on-disk layout. -// -// Limited checks on the validity of the binary input are performed: -// - matrix.ErrShape is returned if the number of rows is negative, -// - an error is returned if the resulting VecDense is too -// big for the current architecture (e.g. a 16GB vector written by a -// 64b application and read back from a 32b application.) -// UnmarshalBinary does not limit the size of the unmarshaled vector, and so -// it should not be used on untrusted data. -func (v *VecDense) UnmarshalBinary(data []byte) error { - if !v.IsZero() { - panic("mat: unmarshal into non-zero vector") - } - - if len(data) < headerSize { - return errTooSmall - } - - var header storage - err := header.unmarshalBinary(data[:headerSize]) - if err != nil { - return err - } - if header.Cols != 1 { - return ErrShape - } - n := header.Rows - header.Version = 0 - header.Rows = 0 - header.Cols = 0 - if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { - return errWrongType - } - if n == 0 { - return ErrZeroLength - } - if n < 0 { - return errBadSize - } - if int64(maxLen) < n { - return errTooBig - } - if len(data) != headerSize+int(n)*sizeFloat64 { - return errBadBuffer - } - - p := headerSize - v.reuseAs(int(n)) - for i := range v.mat.Data { - v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) - p += sizeFloat64 - } - - return nil -} - -// UnmarshalBinaryFrom decodes the binary form into the receiver, from the -// io.Reader and returns the number of bytes read and an error if any. -// It panics if the receiver is a non-zero VecDense. -// -// See MarshalBinary for the on-disk layout. -// See UnmarshalBinary for the list of sanity checks performed on the input. -func (v *VecDense) UnmarshalBinaryFrom(r io.Reader) (int, error) { - if !v.IsZero() { - panic("mat: unmarshal into non-zero vector") - } - - var header storage - n, err := header.unmarshalBinaryFrom(r) - if err != nil { - return n, err - } - if header.Cols != 1 { - return n, ErrShape - } - l := header.Rows - header.Version = 0 - header.Rows = 0 - header.Cols = 0 - if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { - return n, errWrongType - } - if l == 0 { - return n, ErrZeroLength - } - if l < 0 { - return n, errBadSize - } - if int64(maxLen) < l { - return n, errTooBig - } - - v.reuseAs(int(l)) - var b [8]byte - for i := range v.mat.Data { - nn, err := readFull(r, b[:]) - n += nn - if err != nil { - if err == io.EOF { - return n, io.ErrUnexpectedEOF - } - return n, err - } - v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) - } - - return n, nil -} - -// storage is the internal representation of the storage format of a -// serialised matrix. -type storage struct { - Version uint32 // Keep this first. - Form byte // [GST] - Packing byte // [BPF] - Uplo byte // [AUL] - Unit bool - Rows int64 - Cols int64 - KU int64 - KL int64 -} - -// TODO(kortschak): Consider replacing these with calls to direct -// encoding/decoding of fields rather than to binary.Write/binary.Read. - -func (s storage) marshalBinaryTo(w io.Writer) (int, error) { - buf := bytes.NewBuffer(make([]byte, 0, headerSize)) - err := binary.Write(buf, binary.LittleEndian, s) - if err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -func (s *storage) unmarshalBinary(buf []byte) error { - err := binary.Read(bytes.NewReader(buf), binary.LittleEndian, s) - if err != nil { - return err - } - if s.Version != version { - return fmt.Errorf("mat: incorrect version: %d", s.Version) - } - return nil -} - -func (s *storage) unmarshalBinaryFrom(r io.Reader) (int, error) { - buf := make([]byte, headerSize) - n, err := readFull(r, buf) - if err != nil { - return n, err - } - return n, s.unmarshalBinary(buf[:n]) -} - -// readFull reads from r into buf until it has read len(buf). -// It returns the number of bytes copied and an error if fewer bytes were read. -// If an EOF happens after reading fewer than len(buf) bytes, io.ErrUnexpectedEOF is returned. -func readFull(r io.Reader, buf []byte) (int, error) { - var n int - var err error - for n < len(buf) && err == nil { - var nn int - nn, err = r.Read(buf[n:]) - n += nn - } - if n == len(buf) { - return n, nil - } - if err == io.EOF { - return n, io.ErrUnexpectedEOF - } - return n, err -} diff --git a/vendor/gonum.org/v1/gonum/mat/lq.go b/vendor/gonum.org/v1/gonum/mat/lq.go deleted file mode 100644 index 5a22bd1fe..000000000 --- a/vendor/gonum.org/v1/gonum/mat/lq.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/lapack64" -) - -const badLQ = "mat: invalid LQ factorization" - -// LQ is a type for creating and using the LQ factorization of a matrix. -type LQ struct { - lq *Dense - tau []float64 - cond float64 -} - -func (lq *LQ) updateCond(norm lapack.MatrixNorm) { - // Since A = L*Q, and Q is orthogonal, we get for the condition number κ - // κ(A) := |A| |A^-1| = |L*Q| |(L*Q)^-1| = |L| |Q^T * L^-1| - // = |L| |L^-1| = κ(L), - // where we used that fact that Q^-1 = Q^T. However, this assumes that - // the matrix norm is invariant under orthogonal transformations which - // is not the case for CondNorm. Hopefully the error is negligible: κ - // is only a qualitative measure anyway. - m := lq.lq.mat.Rows - work := getFloats(3*m, false) - iwork := getInts(m, false) - l := lq.lq.asTriDense(m, blas.NonUnit, blas.Lower) - v := lapack64.Trcon(norm, l.mat, work, iwork) - lq.cond = 1 / v - putFloats(work) - putInts(iwork) -} - -// Factorize computes the LQ factorization of an m×n matrix a where n <= m. The LQ -// factorization always exists even if A is singular. -// -// The LQ decomposition is a factorization of the matrix A such that A = L * Q. -// The matrix Q is an orthonormal n×n matrix, and L is an m×n upper triangular matrix. -// L and Q can be extracted from the LTo and QTo methods. -func (lq *LQ) Factorize(a Matrix) { - lq.factorize(a, CondNorm) -} - -func (lq *LQ) factorize(a Matrix, norm lapack.MatrixNorm) { - m, n := a.Dims() - if m > n { - panic(ErrShape) - } - k := min(m, n) - if lq.lq == nil { - lq.lq = &Dense{} - } - lq.lq.CloneFrom(a) - work := []float64{0} - lq.tau = make([]float64, k) - lapack64.Gelqf(lq.lq.mat, lq.tau, work, -1) - work = getFloats(int(work[0]), false) - lapack64.Gelqf(lq.lq.mat, lq.tau, work, len(work)) - putFloats(work) - lq.updateCond(norm) -} - -// isValid returns whether the receiver contains a factorization. -func (lq *LQ) isValid() bool { - return lq.lq != nil && !lq.lq.IsZero() -} - -// Cond returns the condition number for the factorized matrix. -// Cond will panic if the receiver does not contain a factorization. -func (lq *LQ) Cond() float64 { - if !lq.isValid() { - panic(badLQ) - } - return lq.cond -} - -// TODO(btracey): Add in the "Reduced" forms for extracting the m×m orthogonal -// and upper triangular matrices. - -// LTo extracts the m×n lower trapezoidal matrix from a LQ decomposition. -// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. -// LTo will panic if the receiver does not contain a factorization. -func (lq *LQ) LTo(dst *Dense) *Dense { - if !lq.isValid() { - panic(badLQ) - } - - r, c := lq.lq.Dims() - if dst == nil { - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(r, c) - } - - // Disguise the LQ as a lower triangular. - t := &TriDense{ - mat: blas64.Triangular{ - N: r, - Stride: lq.lq.mat.Stride, - Data: lq.lq.mat.Data, - Uplo: blas.Lower, - Diag: blas.NonUnit, - }, - cap: lq.lq.capCols, - } - dst.Copy(t) - - if r == c { - return dst - } - // Zero right of the triangular. - for i := 0; i < r; i++ { - zero(dst.mat.Data[i*dst.mat.Stride+r : i*dst.mat.Stride+c]) - } - - return dst -} - -// QTo extracts the n×n orthonormal matrix Q from an LQ decomposition. -// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. -// QTo will panic if the receiver does not contain a factorization. -func (lq *LQ) QTo(dst *Dense) *Dense { - if !lq.isValid() { - panic(badLQ) - } - - _, c := lq.lq.Dims() - if dst == nil { - dst = NewDense(c, c, nil) - } else { - dst.reuseAsZeroed(c, c) - } - q := dst.mat - - // Set Q = I. - ldq := q.Stride - for i := 0; i < c; i++ { - q.Data[i*ldq+i] = 1 - } - - // Construct Q from the elementary reflectors. - work := []float64{0} - lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, -1) - work = getFloats(int(work[0]), false) - lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, len(work)) - putFloats(work) - - return dst -} - -// SolveTo finds a minimum-norm solution to a system of linear equations defined -// by the matrices A and b, where A is an m×n matrix represented in its LQ factorized -// form. If A is singular or near-singular a Condition error is returned. -// See the documentation for Condition for more information. -// -// The minimization problem solved depends on the input parameters. -// If trans == false, find the minimum norm solution of A * X = B. -// If trans == true, find X such that ||A*X - B||_2 is minimized. -// The solution matrix, X, is stored in place into dst. -// SolveTo will panic if the receiver does not contain a factorization. -func (lq *LQ) SolveTo(dst *Dense, trans bool, b Matrix) error { - if !lq.isValid() { - panic(badLQ) - } - - r, c := lq.lq.Dims() - br, bc := b.Dims() - - // The LQ solve algorithm stores the result in-place into the right hand side. - // The storage for the answer must be large enough to hold both b and x. - // However, this method's receiver must be the size of x. Copy b, and then - // copy the result into x at the end. - if trans { - if c != br { - panic(ErrShape) - } - dst.reuseAs(r, bc) - } else { - if r != br { - panic(ErrShape) - } - dst.reuseAs(c, bc) - } - // Do not need to worry about overlap between x and b because w has its own - // independent storage. - w := getWorkspace(max(r, c), bc, false) - w.Copy(b) - t := lq.lq.asTriDense(lq.lq.mat.Rows, blas.NonUnit, blas.Lower).mat - if trans { - work := []float64{0} - lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, -1) - work = getFloats(int(work[0]), false) - lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, len(work)) - putFloats(work) - - ok := lapack64.Trtrs(blas.Trans, t, w.mat) - if !ok { - return Condition(math.Inf(1)) - } - } else { - ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) - if !ok { - return Condition(math.Inf(1)) - } - for i := r; i < c; i++ { - zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) - } - work := []float64{0} - lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, -1) - work = getFloats(int(work[0]), false) - lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, len(work)) - putFloats(work) - } - // x was set above to be the correct size for the result. - dst.Copy(w) - putWorkspace(w) - if lq.cond > ConditionTolerance { - return Condition(lq.cond) - } - return nil -} - -// SolveVecTo finds a minimum-norm solution to a system of linear equations. -// See LQ.SolveTo for the full documentation. -// SolveToVec will panic if the receiver does not contain a factorization. -func (lq *LQ) SolveVecTo(dst *VecDense, trans bool, b Vector) error { - if !lq.isValid() { - panic(badLQ) - } - - r, c := lq.lq.Dims() - if _, bc := b.Dims(); bc != 1 { - panic(ErrShape) - } - - // The Solve implementation is non-trivial, so rather than duplicate the code, - // instead recast the VecDenses as Dense and call the matrix code. - bm := Matrix(b) - if rv, ok := b.(RawVectorer); ok { - bmat := rv.RawVector() - if dst != b { - dst.checkOverlap(bmat) - } - b := VecDense{mat: bmat} - bm = b.asDense() - } - if trans { - dst.reuseAs(r) - } else { - dst.reuseAs(c) - } - return lq.SolveTo(dst.asDense(), trans, bm) -} diff --git a/vendor/gonum.org/v1/gonum/mat/lu.go b/vendor/gonum.org/v1/gonum/mat/lu.go deleted file mode 100644 index e0437169b..000000000 --- a/vendor/gonum.org/v1/gonum/mat/lu.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/floats" - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/lapack64" -) - -const ( - badSliceLength = "mat: improper slice length" - badLU = "mat: invalid LU factorization" -) - -// LU is a type for creating and using the LU factorization of a matrix. -type LU struct { - lu *Dense - pivot []int - cond float64 -} - -// updateCond updates the stored condition number of the matrix. anorm is the -// norm of the original matrix. If anorm is negative it will be estimated. -func (lu *LU) updateCond(anorm float64, norm lapack.MatrixNorm) { - n := lu.lu.mat.Cols - work := getFloats(4*n, false) - defer putFloats(work) - iwork := getInts(n, false) - defer putInts(iwork) - if anorm < 0 { - // This is an approximation. By the definition of a norm, - // |AB| <= |A| |B|. - // Since A = L*U, we get for the condition number κ that - // κ(A) := |A| |A^-1| = |L*U| |A^-1| <= |L| |U| |A^-1|, - // so this will overestimate the condition number somewhat. - // The norm of the original factorized matrix cannot be stored - // because of update possibilities. - u := lu.lu.asTriDense(n, blas.NonUnit, blas.Upper) - l := lu.lu.asTriDense(n, blas.Unit, blas.Lower) - unorm := lapack64.Lantr(norm, u.mat, work) - lnorm := lapack64.Lantr(norm, l.mat, work) - anorm = unorm * lnorm - } - v := lapack64.Gecon(norm, lu.lu.mat, anorm, work, iwork) - lu.cond = 1 / v -} - -// Factorize computes the LU factorization of the square matrix a and stores the -// result. The LU decomposition will complete regardless of the singularity of a. -// -// The LU factorization is computed with pivoting, and so really the decomposition -// is a PLU decomposition where P is a permutation matrix. The individual matrix -// factors can be extracted from the factorization using the Permutation method -// on Dense, and the LU LTo and UTo methods. -func (lu *LU) Factorize(a Matrix) { - lu.factorize(a, CondNorm) -} - -func (lu *LU) factorize(a Matrix, norm lapack.MatrixNorm) { - r, c := a.Dims() - if r != c { - panic(ErrSquare) - } - if lu.lu == nil { - lu.lu = NewDense(r, r, nil) - } else { - lu.lu.Reset() - lu.lu.reuseAs(r, r) - } - lu.lu.Copy(a) - if cap(lu.pivot) < r { - lu.pivot = make([]int, r) - } - lu.pivot = lu.pivot[:r] - work := getFloats(r, false) - anorm := lapack64.Lange(norm, lu.lu.mat, work) - putFloats(work) - lapack64.Getrf(lu.lu.mat, lu.pivot) - lu.updateCond(anorm, norm) -} - -// isValid returns whether the receiver contains a factorization. -func (lu *LU) isValid() bool { - return lu.lu != nil && !lu.lu.IsZero() -} - -// Cond returns the condition number for the factorized matrix. -// Cond will panic if the receiver does not contain a factorization. -func (lu *LU) Cond() float64 { - if !lu.isValid() { - panic(badLU) - } - return lu.cond -} - -// Reset resets the factorization so that it can be reused as the receiver of a -// dimensionally restricted operation. -func (lu *LU) Reset() { - if lu.lu != nil { - lu.lu.Reset() - } - lu.pivot = lu.pivot[:0] -} - -func (lu *LU) isZero() bool { - return len(lu.pivot) == 0 -} - -// Det returns the determinant of the matrix that has been factorized. In many -// expressions, using LogDet will be more numerically stable. -// Det will panic if the receiver does not contain a factorization. -func (lu *LU) Det() float64 { - det, sign := lu.LogDet() - return math.Exp(det) * sign -} - -// LogDet returns the log of the determinant and the sign of the determinant -// for the matrix that has been factorized. Numerical stability in product and -// division expressions is generally improved by working in log space. -// LogDet will panic if the receiver does not contain a factorization. -func (lu *LU) LogDet() (det float64, sign float64) { - if !lu.isValid() { - panic(badLU) - } - - _, n := lu.lu.Dims() - logDiag := getFloats(n, false) - defer putFloats(logDiag) - sign = 1.0 - for i := 0; i < n; i++ { - v := lu.lu.at(i, i) - if v < 0 { - sign *= -1 - } - if lu.pivot[i] != i { - sign *= -1 - } - logDiag[i] = math.Log(math.Abs(v)) - } - return floats.Sum(logDiag), sign -} - -// Pivot returns pivot indices that enable the construction of the permutation -// matrix P (see Dense.Permutation). If swaps == nil, then new memory will be -// allocated, otherwise the length of the input must be equal to the size of the -// factorized matrix. -// Pivot will panic if the receiver does not contain a factorization. -func (lu *LU) Pivot(swaps []int) []int { - if !lu.isValid() { - panic(badLU) - } - - _, n := lu.lu.Dims() - if swaps == nil { - swaps = make([]int, n) - } - if len(swaps) != n { - panic(badSliceLength) - } - // Perform the inverse of the row swaps in order to find the final - // row swap position. - for i := range swaps { - swaps[i] = i - } - for i := n - 1; i >= 0; i-- { - v := lu.pivot[i] - swaps[i], swaps[v] = swaps[v], swaps[i] - } - return swaps -} - -// RankOne updates an LU factorization as if a rank-one update had been applied to -// the original matrix A, storing the result into the receiver. That is, if in -// the original LU decomposition P * L * U = A, in the updated decomposition -// P * L * U = A + alpha * x * y^T. -// RankOne will panic if orig does not contain a factorization. -func (lu *LU) RankOne(orig *LU, alpha float64, x, y Vector) { - if !orig.isValid() { - panic(badLU) - } - - // RankOne uses algorithm a1 on page 28 of "Multiple-Rank Updates to Matrix - // Factorizations for Nonlinear Analysis and Circuit Design" by Linzhong Deng. - // http://web.stanford.edu/group/SOL/dissertations/Linzhong-Deng-thesis.pdf - _, n := orig.lu.Dims() - if r, c := x.Dims(); r != n || c != 1 { - panic(ErrShape) - } - if r, c := y.Dims(); r != n || c != 1 { - panic(ErrShape) - } - if orig != lu { - if lu.isZero() { - if cap(lu.pivot) < n { - lu.pivot = make([]int, n) - } - lu.pivot = lu.pivot[:n] - if lu.lu == nil { - lu.lu = NewDense(n, n, nil) - } else { - lu.lu.reuseAs(n, n) - } - } else if len(lu.pivot) != n { - panic(ErrShape) - } - copy(lu.pivot, orig.pivot) - lu.lu.Copy(orig.lu) - } - - xs := getFloats(n, false) - defer putFloats(xs) - ys := getFloats(n, false) - defer putFloats(ys) - for i := 0; i < n; i++ { - xs[i] = x.AtVec(i) - ys[i] = y.AtVec(i) - } - - // Adjust for the pivoting in the LU factorization - for i, v := range lu.pivot { - xs[i], xs[v] = xs[v], xs[i] - } - - lum := lu.lu.mat - omega := alpha - for j := 0; j < n; j++ { - ujj := lum.Data[j*lum.Stride+j] - ys[j] /= ujj - theta := 1 + xs[j]*ys[j]*omega - beta := omega * ys[j] / theta - gamma := omega * xs[j] - omega -= beta * gamma - lum.Data[j*lum.Stride+j] *= theta - for i := j + 1; i < n; i++ { - xs[i] -= lum.Data[i*lum.Stride+j] * xs[j] - tmp := ys[i] - ys[i] -= lum.Data[j*lum.Stride+i] * ys[j] - lum.Data[i*lum.Stride+j] += beta * xs[i] - lum.Data[j*lum.Stride+i] += gamma * tmp - } - } - lu.updateCond(-1, CondNorm) -} - -// LTo extracts the lower triangular matrix from an LU factorization. -// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. -// LTo will panic if the receiver does not contain a factorization. -func (lu *LU) LTo(dst *TriDense) *TriDense { - if !lu.isValid() { - panic(badLU) - } - - _, n := lu.lu.Dims() - if dst == nil { - dst = NewTriDense(n, Lower, nil) - } else { - dst.reuseAs(n, Lower) - } - // Extract the lower triangular elements. - for i := 0; i < n; i++ { - for j := 0; j < i; j++ { - dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] - } - } - // Set ones on the diagonal. - for i := 0; i < n; i++ { - dst.mat.Data[i*dst.mat.Stride+i] = 1 - } - return dst -} - -// UTo extracts the upper triangular matrix from an LU factorization. -// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. -// UTo will panic if the receiver does not contain a factorization. -func (lu *LU) UTo(dst *TriDense) *TriDense { - if !lu.isValid() { - panic(badLU) - } - - _, n := lu.lu.Dims() - if dst == nil { - dst = NewTriDense(n, Upper, nil) - } else { - dst.reuseAs(n, Upper) - } - // Extract the upper triangular elements. - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] - } - } - return dst -} - -// Permutation constructs an r×r permutation matrix with the given row swaps. -// A permutation matrix has exactly one element equal to one in each row and column -// and all other elements equal to zero. swaps[i] specifies the row with which -// i will be swapped, which is equivalent to the non-zero column of row i. -func (m *Dense) Permutation(r int, swaps []int) { - m.reuseAs(r, r) - for i := 0; i < r; i++ { - zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+r]) - v := swaps[i] - if v < 0 || v >= r { - panic(ErrRowAccess) - } - m.mat.Data[i*m.mat.Stride+v] = 1 - } -} - -// SolveTo solves a system of linear equations using the LU decomposition of a matrix. -// It computes -// A * X = B if trans == false -// A^T * X = B if trans == true -// In both cases, A is represented in LU factorized form, and the matrix X is -// stored into dst. -// -// If A is singular or near-singular a Condition error is returned. See -// the documentation for Condition for more information. -// SolveTo will panic if the receiver does not contain a factorization. -func (lu *LU) SolveTo(dst *Dense, trans bool, b Matrix) error { - if !lu.isValid() { - panic(badLU) - } - - _, n := lu.lu.Dims() - br, bc := b.Dims() - if br != n { - panic(ErrShape) - } - // TODO(btracey): Should test the condition number instead of testing that - // the determinant is exactly zero. - if lu.Det() == 0 { - return Condition(math.Inf(1)) - } - - dst.reuseAs(n, bc) - bU, _ := untranspose(b) - var restore func() - if dst == bU { - dst, restore = dst.isolatedWorkspace(bU) - defer restore() - } else if rm, ok := bU.(RawMatrixer); ok { - dst.checkOverlap(rm.RawMatrix()) - } - - dst.Copy(b) - t := blas.NoTrans - if trans { - t = blas.Trans - } - lapack64.Getrs(t, lu.lu.mat, dst.mat, lu.pivot) - if lu.cond > ConditionTolerance { - return Condition(lu.cond) - } - return nil -} - -// SolveVecTo solves a system of linear equations using the LU decomposition of a matrix. -// It computes -// A * x = b if trans == false -// A^T * x = b if trans == true -// In both cases, A is represented in LU factorized form, and the vector x is -// stored into dst. -// -// If A is singular or near-singular a Condition error is returned. See -// the documentation for Condition for more information. -// SolveVecTo will panic if the receiver does not contain a factorization. -func (lu *LU) SolveVecTo(dst *VecDense, trans bool, b Vector) error { - if !lu.isValid() { - panic(badLU) - } - - _, n := lu.lu.Dims() - if br, bc := b.Dims(); br != n || bc != 1 { - panic(ErrShape) - } - switch rv := b.(type) { - default: - dst.reuseAs(n) - return lu.SolveTo(dst.asDense(), trans, b) - case RawVectorer: - if dst != b { - dst.checkOverlap(rv.RawVector()) - } - // TODO(btracey): Should test the condition number instead of testing that - // the determinant is exactly zero. - if lu.Det() == 0 { - return Condition(math.Inf(1)) - } - - dst.reuseAs(n) - var restore func() - if dst == b { - dst, restore = dst.isolatedWorkspace(b) - defer restore() - } - dst.CopyVec(b) - vMat := blas64.General{ - Rows: n, - Cols: 1, - Stride: dst.mat.Inc, - Data: dst.mat.Data, - } - t := blas.NoTrans - if trans { - t = blas.Trans - } - lapack64.Getrs(t, lu.lu.mat, vMat, lu.pivot) - if lu.cond > ConditionTolerance { - return Condition(lu.cond) - } - return nil - } -} diff --git a/vendor/gonum.org/v1/gonum/mat/matrix.go b/vendor/gonum.org/v1/gonum/mat/matrix.go deleted file mode 100644 index 456e78d83..000000000 --- a/vendor/gonum.org/v1/gonum/mat/matrix.go +++ /dev/null @@ -1,985 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/floats" - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/lapack64" -) - -// Matrix is the basic matrix interface type. -type Matrix interface { - // Dims returns the dimensions of a Matrix. - Dims() (r, c int) - - // At returns the value of a matrix element at row i, column j. - // It will panic if i or j are out of bounds for the matrix. - At(i, j int) float64 - - // T returns the transpose of the Matrix. Whether T returns a copy of the - // underlying data is implementation dependent. - // This method may be implemented using the Transpose type, which - // provides an implicit matrix transpose. - T() Matrix -} - -var ( - _ Matrix = Transpose{} - _ Untransposer = Transpose{} -) - -// Transpose is a type for performing an implicit matrix transpose. It implements -// the Matrix interface, returning values from the transpose of the matrix within. -type Transpose struct { - Matrix Matrix -} - -// At returns the value of the element at row i and column j of the transposed -// matrix, that is, row j and column i of the Matrix field. -func (t Transpose) At(i, j int) float64 { - return t.Matrix.At(j, i) -} - -// Dims returns the dimensions of the transposed matrix. The number of rows returned -// is the number of columns in the Matrix field, and the number of columns is -// the number of rows in the Matrix field. -func (t Transpose) Dims() (r, c int) { - c, r = t.Matrix.Dims() - return r, c -} - -// T performs an implicit transpose by returning the Matrix field. -func (t Transpose) T() Matrix { - return t.Matrix -} - -// Untranspose returns the Matrix field. -func (t Transpose) Untranspose() Matrix { - return t.Matrix -} - -// Untransposer is a type that can undo an implicit transpose. -type Untransposer interface { - // Note: This interface is needed to unify all of the Transpose types. In - // the mat methods, we need to test if the Matrix has been implicitly - // transposed. If this is checked by testing for the specific Transpose type - // then the behavior will be different if the user uses T() or TTri() for a - // triangular matrix. - - // Untranspose returns the underlying Matrix stored for the implicit transpose. - Untranspose() Matrix -} - -// UntransposeBander is a type that can undo an implicit band transpose. -type UntransposeBander interface { - // Untranspose returns the underlying Banded stored for the implicit transpose. - UntransposeBand() Banded -} - -// UntransposeTrier is a type that can undo an implicit triangular transpose. -type UntransposeTrier interface { - // Untranspose returns the underlying Triangular stored for the implicit transpose. - UntransposeTri() Triangular -} - -// UntransposeTriBander is a type that can undo an implicit triangular banded -// transpose. -type UntransposeTriBander interface { - // Untranspose returns the underlying Triangular stored for the implicit transpose. - UntransposeTriBand() TriBanded -} - -// Mutable is a matrix interface type that allows elements to be altered. -type Mutable interface { - // Set alters the matrix element at row i, column j to v. - // It will panic if i or j are out of bounds for the matrix. - Set(i, j int, v float64) - - Matrix -} - -// A RowViewer can return a Vector reflecting a row that is backed by the matrix -// data. The Vector returned will have length equal to the number of columns. -type RowViewer interface { - RowView(i int) Vector -} - -// A RawRowViewer can return a slice of float64 reflecting a row that is backed by the matrix -// data. -type RawRowViewer interface { - RawRowView(i int) []float64 -} - -// A ColViewer can return a Vector reflecting a column that is backed by the matrix -// data. The Vector returned will have length equal to the number of rows. -type ColViewer interface { - ColView(j int) Vector -} - -// A RawColViewer can return a slice of float64 reflecting a column that is backed by the matrix -// data. -type RawColViewer interface { - RawColView(j int) []float64 -} - -// A ClonerFrom can make a copy of a into the receiver, overwriting the previous value of the -// receiver. The clone operation does not make any restriction on shape and will not cause -// shadowing. -type ClonerFrom interface { - CloneFrom(a Matrix) -} - -// A Reseter can reset the matrix so that it can be reused as the receiver of a dimensionally -// restricted operation. This is commonly used when the matrix is being used as a workspace -// or temporary matrix. -// -// If the matrix is a view, using the reset matrix may result in data corruption in elements -// outside the view. -type Reseter interface { - Reset() -} - -// A Copier can make a copy of elements of a into the receiver. The submatrix copied -// starts at row and column 0 and has dimensions equal to the minimum dimensions of -// the two matrices. The number of row and columns copied is returned. -// Copy will copy from a source that aliases the receiver unless the source is transposed; -// an aliasing transpose copy will panic with the exception for a special case when -// the source data has a unitary increment or stride. -type Copier interface { - Copy(a Matrix) (r, c int) -} - -// A Grower can grow the size of the represented matrix by the given number of rows and columns. -// Growing beyond the size given by the Caps method will result in the allocation of a new -// matrix and copying of the elements. If Grow is called with negative increments it will -// panic with ErrIndexOutOfRange. -type Grower interface { - Caps() (r, c int) - Grow(r, c int) Matrix -} - -// A BandWidther represents a banded matrix and can return the left and right half-bandwidths, k1 and -// k2. -type BandWidther interface { - BandWidth() (k1, k2 int) -} - -// A RawMatrixSetter can set the underlying blas64.General used by the receiver. There is no restriction -// on the shape of the receiver. Changes to the receiver's elements will be reflected in the blas64.General.Data. -type RawMatrixSetter interface { - SetRawMatrix(a blas64.General) -} - -// A RawMatrixer can return a blas64.General representation of the receiver. Changes to the blas64.General.Data -// slice will be reflected in the original matrix, changes to the Rows, Cols and Stride fields will not. -type RawMatrixer interface { - RawMatrix() blas64.General -} - -// A RawVectorer can return a blas64.Vector representation of the receiver. Changes to the blas64.Vector.Data -// slice will be reflected in the original matrix, changes to the Inc field will not. -type RawVectorer interface { - RawVector() blas64.Vector -} - -// A NonZeroDoer can call a function for each non-zero element of the receiver. -// The parameters of the function are the element indices and its value. -type NonZeroDoer interface { - DoNonZero(func(i, j int, v float64)) -} - -// A RowNonZeroDoer can call a function for each non-zero element of a row of the receiver. -// The parameters of the function are the element indices and its value. -type RowNonZeroDoer interface { - DoRowNonZero(i int, fn func(i, j int, v float64)) -} - -// A ColNonZeroDoer can call a function for each non-zero element of a column of the receiver. -// The parameters of the function are the element indices and its value. -type ColNonZeroDoer interface { - DoColNonZero(j int, fn func(i, j int, v float64)) -} - -// untranspose untransposes a matrix if applicable. If a is an Untransposer, then -// untranspose returns the underlying matrix and true. If it is not, then it returns -// the input matrix and false. -func untranspose(a Matrix) (Matrix, bool) { - if ut, ok := a.(Untransposer); ok { - return ut.Untranspose(), true - } - return a, false -} - -// untransposeExtract returns an untransposed matrix in a built-in matrix type. -// -// The untransposed matrix is returned unaltered if it is a built-in matrix type. -// Otherwise, if it implements a Raw method, an appropriate built-in type value -// is returned holding the raw matrix value of the input. If neither of these -// is possible, the untransposed matrix is returned. -func untransposeExtract(a Matrix) (Matrix, bool) { - ut, trans := untranspose(a) - switch m := ut.(type) { - case *DiagDense, *SymBandDense, *TriBandDense, *BandDense, *TriDense, *SymDense, *Dense: - return m, trans - // TODO(btracey): Add here if we ever have an equivalent of RawDiagDense. - case RawSymBander: - rsb := m.RawSymBand() - if rsb.Uplo != blas.Upper { - return ut, trans - } - var sb SymBandDense - sb.SetRawSymBand(rsb) - return &sb, trans - case RawTriBander: - rtb := m.RawTriBand() - if rtb.Diag == blas.Unit { - return ut, trans - } - var tb TriBandDense - tb.SetRawTriBand(rtb) - return &tb, trans - case RawBander: - var b BandDense - b.SetRawBand(m.RawBand()) - return &b, trans - case RawTriangular: - rt := m.RawTriangular() - if rt.Diag == blas.Unit { - return ut, trans - } - var t TriDense - t.SetRawTriangular(rt) - return &t, trans - case RawSymmetricer: - rs := m.RawSymmetric() - if rs.Uplo != blas.Upper { - return ut, trans - } - var s SymDense - s.SetRawSymmetric(rs) - return &s, trans - case RawMatrixer: - var d Dense - d.SetRawMatrix(m.RawMatrix()) - return &d, trans - default: - return ut, trans - } -} - -// TODO(btracey): Consider adding CopyCol/CopyRow if the behavior seems useful. -// TODO(btracey): Add in fast paths to Row/Col for the other concrete types -// (TriDense, etc.) as well as relevant interfaces (RowColer, RawRowViewer, etc.) - -// Col copies the elements in the jth column of the matrix into the slice dst. -// The length of the provided slice must equal the number of rows, unless the -// slice is nil in which case a new slice is first allocated. -func Col(dst []float64, j int, a Matrix) []float64 { - r, c := a.Dims() - if j < 0 || j >= c { - panic(ErrColAccess) - } - if dst == nil { - dst = make([]float64, r) - } else { - if len(dst) != r { - panic(ErrColLength) - } - } - aU, aTrans := untranspose(a) - if rm, ok := aU.(RawMatrixer); ok { - m := rm.RawMatrix() - if aTrans { - copy(dst, m.Data[j*m.Stride:j*m.Stride+m.Cols]) - return dst - } - blas64.Copy(blas64.Vector{N: r, Inc: m.Stride, Data: m.Data[j:]}, - blas64.Vector{N: r, Inc: 1, Data: dst}, - ) - return dst - } - for i := 0; i < r; i++ { - dst[i] = a.At(i, j) - } - return dst -} - -// Row copies the elements in the ith row of the matrix into the slice dst. -// The length of the provided slice must equal the number of columns, unless the -// slice is nil in which case a new slice is first allocated. -func Row(dst []float64, i int, a Matrix) []float64 { - r, c := a.Dims() - if i < 0 || i >= r { - panic(ErrColAccess) - } - if dst == nil { - dst = make([]float64, c) - } else { - if len(dst) != c { - panic(ErrRowLength) - } - } - aU, aTrans := untranspose(a) - if rm, ok := aU.(RawMatrixer); ok { - m := rm.RawMatrix() - if aTrans { - blas64.Copy(blas64.Vector{N: c, Inc: m.Stride, Data: m.Data[i:]}, - blas64.Vector{N: c, Inc: 1, Data: dst}, - ) - return dst - } - copy(dst, m.Data[i*m.Stride:i*m.Stride+m.Cols]) - return dst - } - for j := 0; j < c; j++ { - dst[j] = a.At(i, j) - } - return dst -} - -// Cond returns the condition number of the given matrix under the given norm. -// The condition number must be based on the 1-norm, 2-norm or ∞-norm. -// Cond will panic with matrix.ErrShape if the matrix has zero size. -// -// BUG(btracey): The computation of the 1-norm and ∞-norm for non-square matrices -// is innacurate, although is typically the right order of magnitude. See -// https://github.com/xianyi/OpenBLAS/issues/636. While the value returned will -// change with the resolution of this bug, the result from Cond will match the -// condition number used internally. -func Cond(a Matrix, norm float64) float64 { - m, n := a.Dims() - if m == 0 || n == 0 { - panic(ErrShape) - } - var lnorm lapack.MatrixNorm - switch norm { - default: - panic("mat: bad norm value") - case 1: - lnorm = lapack.MaxColumnSum - case 2: - var svd SVD - ok := svd.Factorize(a, SVDNone) - if !ok { - return math.Inf(1) - } - return svd.Cond() - case math.Inf(1): - lnorm = lapack.MaxRowSum - } - - if m == n { - // Use the LU decomposition to compute the condition number. - var lu LU - lu.factorize(a, lnorm) - return lu.Cond() - } - if m > n { - // Use the QR factorization to compute the condition number. - var qr QR - qr.factorize(a, lnorm) - return qr.Cond() - } - // Use the LQ factorization to compute the condition number. - var lq LQ - lq.factorize(a, lnorm) - return lq.Cond() -} - -// Det returns the determinant of the matrix a. In many expressions using LogDet -// will be more numerically stable. -func Det(a Matrix) float64 { - det, sign := LogDet(a) - return math.Exp(det) * sign -} - -// Dot returns the sum of the element-wise product of a and b. -// Dot panics if the matrix sizes are unequal. -func Dot(a, b Vector) float64 { - la := a.Len() - lb := b.Len() - if la != lb { - panic(ErrShape) - } - if arv, ok := a.(RawVectorer); ok { - if brv, ok := b.(RawVectorer); ok { - return blas64.Dot(arv.RawVector(), brv.RawVector()) - } - } - var sum float64 - for i := 0; i < la; i++ { - sum += a.At(i, 0) * b.At(i, 0) - } - return sum -} - -// Equal returns whether the matrices a and b have the same size -// and are element-wise equal. -func Equal(a, b Matrix) bool { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - return false - } - aU, aTrans := untranspose(a) - bU, bTrans := untranspose(b) - if rma, ok := aU.(RawMatrixer); ok { - if rmb, ok := bU.(RawMatrixer); ok { - ra := rma.RawMatrix() - rb := rmb.RawMatrix() - if aTrans == bTrans { - for i := 0; i < ra.Rows; i++ { - for j := 0; j < ra.Cols; j++ { - if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { - return false - } - } - } - return true - } - for i := 0; i < ra.Rows; i++ { - for j := 0; j < ra.Cols; j++ { - if ra.Data[i*ra.Stride+j] != rb.Data[j*rb.Stride+i] { - return false - } - } - } - return true - } - } - if rma, ok := aU.(RawSymmetricer); ok { - if rmb, ok := bU.(RawSymmetricer); ok { - ra := rma.RawSymmetric() - rb := rmb.RawSymmetric() - // Symmetric matrices are always upper and equal to their transpose. - for i := 0; i < ra.N; i++ { - for j := i; j < ra.N; j++ { - if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { - return false - } - } - } - return true - } - } - if ra, ok := aU.(*VecDense); ok { - if rb, ok := bU.(*VecDense); ok { - // If the raw vectors are the same length they must either both be - // transposed or both not transposed (or have length 1). - for i := 0; i < ra.mat.N; i++ { - if ra.mat.Data[i*ra.mat.Inc] != rb.mat.Data[i*rb.mat.Inc] { - return false - } - } - return true - } - } - for i := 0; i < ar; i++ { - for j := 0; j < ac; j++ { - if a.At(i, j) != b.At(i, j) { - return false - } - } - } - return true -} - -// EqualApprox returns whether the matrices a and b have the same size and contain all equal -// elements with tolerance for element-wise equality specified by epsilon. Matrices -// with non-equal shapes are not equal. -func EqualApprox(a, b Matrix, epsilon float64) bool { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || ac != bc { - return false - } - aU, aTrans := untranspose(a) - bU, bTrans := untranspose(b) - if rma, ok := aU.(RawMatrixer); ok { - if rmb, ok := bU.(RawMatrixer); ok { - ra := rma.RawMatrix() - rb := rmb.RawMatrix() - if aTrans == bTrans { - for i := 0; i < ra.Rows; i++ { - for j := 0; j < ra.Cols; j++ { - if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { - return false - } - } - } - return true - } - for i := 0; i < ra.Rows; i++ { - for j := 0; j < ra.Cols; j++ { - if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[j*rb.Stride+i], epsilon, epsilon) { - return false - } - } - } - return true - } - } - if rma, ok := aU.(RawSymmetricer); ok { - if rmb, ok := bU.(RawSymmetricer); ok { - ra := rma.RawSymmetric() - rb := rmb.RawSymmetric() - // Symmetric matrices are always upper and equal to their transpose. - for i := 0; i < ra.N; i++ { - for j := i; j < ra.N; j++ { - if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { - return false - } - } - } - return true - } - } - if ra, ok := aU.(*VecDense); ok { - if rb, ok := bU.(*VecDense); ok { - // If the raw vectors are the same length they must either both be - // transposed or both not transposed (or have length 1). - for i := 0; i < ra.mat.N; i++ { - if !floats.EqualWithinAbsOrRel(ra.mat.Data[i*ra.mat.Inc], rb.mat.Data[i*rb.mat.Inc], epsilon, epsilon) { - return false - } - } - return true - } - } - for i := 0; i < ar; i++ { - for j := 0; j < ac; j++ { - if !floats.EqualWithinAbsOrRel(a.At(i, j), b.At(i, j), epsilon, epsilon) { - return false - } - } - } - return true -} - -// LogDet returns the log of the determinant and the sign of the determinant -// for the matrix that has been factorized. Numerical stability in product and -// division expressions is generally improved by working in log space. -func LogDet(a Matrix) (det float64, sign float64) { - // TODO(btracey): Add specialized routines for TriDense, etc. - var lu LU - lu.Factorize(a) - return lu.LogDet() -} - -// Max returns the largest element value of the matrix A. -// Max will panic with matrix.ErrShape if the matrix has zero size. -func Max(a Matrix) float64 { - r, c := a.Dims() - if r == 0 || c == 0 { - panic(ErrShape) - } - // Max(A) = Max(A^T) - aU, _ := untranspose(a) - switch m := aU.(type) { - case RawMatrixer: - rm := m.RawMatrix() - max := math.Inf(-1) - for i := 0; i < rm.Rows; i++ { - for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { - if v > max { - max = v - } - } - } - return max - case RawTriangular: - rm := m.RawTriangular() - // The max of a triangular is at least 0 unless the size is 1. - if rm.N == 1 { - return rm.Data[0] - } - max := 0.0 - if rm.Uplo == blas.Upper { - for i := 0; i < rm.N; i++ { - for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { - if v > max { - max = v - } - } - } - return max - } - for i := 0; i < rm.N; i++ { - for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { - if v > max { - max = v - } - } - } - return max - case RawSymmetricer: - rm := m.RawSymmetric() - if rm.Uplo != blas.Upper { - panic(badSymTriangle) - } - max := math.Inf(-1) - for i := 0; i < rm.N; i++ { - for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { - if v > max { - max = v - } - } - } - return max - default: - r, c := aU.Dims() - max := math.Inf(-1) - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - v := aU.At(i, j) - if v > max { - max = v - } - } - } - return max - } -} - -// Min returns the smallest element value of the matrix A. -// Min will panic with matrix.ErrShape if the matrix has zero size. -func Min(a Matrix) float64 { - r, c := a.Dims() - if r == 0 || c == 0 { - panic(ErrShape) - } - // Min(A) = Min(A^T) - aU, _ := untranspose(a) - switch m := aU.(type) { - case RawMatrixer: - rm := m.RawMatrix() - min := math.Inf(1) - for i := 0; i < rm.Rows; i++ { - for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { - if v < min { - min = v - } - } - } - return min - case RawTriangular: - rm := m.RawTriangular() - // The min of a triangular is at most 0 unless the size is 1. - if rm.N == 1 { - return rm.Data[0] - } - min := 0.0 - if rm.Uplo == blas.Upper { - for i := 0; i < rm.N; i++ { - for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { - if v < min { - min = v - } - } - } - return min - } - for i := 0; i < rm.N; i++ { - for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { - if v < min { - min = v - } - } - } - return min - case RawSymmetricer: - rm := m.RawSymmetric() - if rm.Uplo != blas.Upper { - panic(badSymTriangle) - } - min := math.Inf(1) - for i := 0; i < rm.N; i++ { - for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { - if v < min { - min = v - } - } - } - return min - default: - r, c := aU.Dims() - min := math.Inf(1) - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - v := aU.At(i, j) - if v < min { - min = v - } - } - } - return min - } -} - -// Norm returns the specified (induced) norm of the matrix a. See -// https://en.wikipedia.org/wiki/Matrix_norm for the definition of an induced norm. -// -// Valid norms are: -// 1 - The maximum absolute column sum -// 2 - Frobenius norm, the square root of the sum of the squares of the elements. -// Inf - The maximum absolute row sum. -// Norm will panic with ErrNormOrder if an illegal norm order is specified and -// with matrix.ErrShape if the matrix has zero size. -func Norm(a Matrix, norm float64) float64 { - r, c := a.Dims() - if r == 0 || c == 0 { - panic(ErrShape) - } - aU, aTrans := untranspose(a) - var work []float64 - switch rma := aU.(type) { - case RawMatrixer: - rm := rma.RawMatrix() - n := normLapack(norm, aTrans) - if n == lapack.MaxColumnSum { - work = getFloats(rm.Cols, false) - defer putFloats(work) - } - return lapack64.Lange(n, rm, work) - case RawTriangular: - rm := rma.RawTriangular() - n := normLapack(norm, aTrans) - if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { - work = getFloats(rm.N, false) - defer putFloats(work) - } - return lapack64.Lantr(n, rm, work) - case RawSymmetricer: - rm := rma.RawSymmetric() - n := normLapack(norm, aTrans) - if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { - work = getFloats(rm.N, false) - defer putFloats(work) - } - return lapack64.Lansy(n, rm, work) - case *VecDense: - rv := rma.RawVector() - switch norm { - default: - panic("unreachable") - case 1: - if aTrans { - imax := blas64.Iamax(rv) - return math.Abs(rma.At(imax, 0)) - } - return blas64.Asum(rv) - case 2: - return blas64.Nrm2(rv) - case math.Inf(1): - if aTrans { - return blas64.Asum(rv) - } - imax := blas64.Iamax(rv) - return math.Abs(rma.At(imax, 0)) - } - } - switch norm { - default: - panic("unreachable") - case 1: - var max float64 - for j := 0; j < c; j++ { - var sum float64 - for i := 0; i < r; i++ { - sum += math.Abs(a.At(i, j)) - } - if sum > max { - max = sum - } - } - return max - case 2: - var sum float64 - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - v := a.At(i, j) - sum += v * v - } - } - return math.Sqrt(sum) - case math.Inf(1): - var max float64 - for i := 0; i < r; i++ { - var sum float64 - for j := 0; j < c; j++ { - sum += math.Abs(a.At(i, j)) - } - if sum > max { - max = sum - } - } - return max - } -} - -// normLapack converts the float64 norm input in Norm to a lapack.MatrixNorm. -func normLapack(norm float64, aTrans bool) lapack.MatrixNorm { - switch norm { - case 1: - n := lapack.MaxColumnSum - if aTrans { - n = lapack.MaxRowSum - } - return n - case 2: - return lapack.Frobenius - case math.Inf(1): - n := lapack.MaxRowSum - if aTrans { - n = lapack.MaxColumnSum - } - return n - default: - panic(ErrNormOrder) - } -} - -// Sum returns the sum of the elements of the matrix. -func Sum(a Matrix) float64 { - - var sum float64 - aU, _ := untranspose(a) - switch rma := aU.(type) { - case RawSymmetricer: - rm := rma.RawSymmetric() - for i := 0; i < rm.N; i++ { - // Diagonals count once while off-diagonals count twice. - sum += rm.Data[i*rm.Stride+i] - var s float64 - for _, v := range rm.Data[i*rm.Stride+i+1 : i*rm.Stride+rm.N] { - s += v - } - sum += 2 * s - } - return sum - case RawTriangular: - rm := rma.RawTriangular() - var startIdx, endIdx int - for i := 0; i < rm.N; i++ { - // Start and end index for this triangle-row. - switch rm.Uplo { - case blas.Upper: - startIdx = i - endIdx = rm.N - case blas.Lower: - startIdx = 0 - endIdx = i + 1 - default: - panic(badTriangle) - } - for _, v := range rm.Data[i*rm.Stride+startIdx : i*rm.Stride+endIdx] { - sum += v - } - } - return sum - case RawMatrixer: - rm := rma.RawMatrix() - for i := 0; i < rm.Rows; i++ { - for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { - sum += v - } - } - return sum - case *VecDense: - rm := rma.RawVector() - for i := 0; i < rm.N; i++ { - sum += rm.Data[i*rm.Inc] - } - return sum - default: - r, c := a.Dims() - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - sum += a.At(i, j) - } - } - return sum - } -} - -// A Tracer can compute the trace of the matrix. Trace must panic if the -// matrix is not square. -type Tracer interface { - Trace() float64 -} - -// Trace returns the trace of the matrix. Trace will panic if the -// matrix is not square. -func Trace(a Matrix) float64 { - m, _ := untransposeExtract(a) - if t, ok := m.(Tracer); ok { - return t.Trace() - } - r, c := a.Dims() - if r != c { - panic(ErrSquare) - } - var v float64 - for i := 0; i < r; i++ { - v += a.At(i, i) - } - return v -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// use returns a float64 slice with l elements, using f if it -// has the necessary capacity, otherwise creating a new slice. -func use(f []float64, l int) []float64 { - if l <= cap(f) { - return f[:l] - } - return make([]float64, l) -} - -// useZeroed returns a float64 slice with l elements, using f if it -// has the necessary capacity, otherwise creating a new slice. The -// elements of the returned slice are guaranteed to be zero. -func useZeroed(f []float64, l int) []float64 { - if l <= cap(f) { - f = f[:l] - zero(f) - return f - } - return make([]float64, l) -} - -// zero zeros the given slice's elements. -func zero(f []float64) { - for i := range f { - f[i] = 0 - } -} - -// useInt returns an int slice with l elements, using i if it -// has the necessary capacity, otherwise creating a new slice. -func useInt(i []int, l int) []int { - if l <= cap(i) { - return i[:l] - } - return make([]int, l) -} diff --git a/vendor/gonum.org/v1/gonum/mat/offset.go b/vendor/gonum.org/v1/gonum/mat/offset.go deleted file mode 100644 index af2c03b64..000000000 --- a/vendor/gonum.org/v1/gonum/mat/offset.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!safe - -package mat - -import "unsafe" - -// offset returns the number of float64 values b[0] is after a[0]. -func offset(a, b []float64) int { - if &a[0] == &b[0] { - return 0 - } - // This expression must be atomic with respect to GC moves. - // At this stage this is true, because the GC does not - // move. See https://golang.org/issue/12445. - return int(uintptr(unsafe.Pointer(&b[0]))-uintptr(unsafe.Pointer(&a[0]))) / int(unsafe.Sizeof(float64(0))) -} diff --git a/vendor/gonum.org/v1/gonum/mat/offset_appengine.go b/vendor/gonum.org/v1/gonum/mat/offset_appengine.go deleted file mode 100644 index df617478c..000000000 --- a/vendor/gonum.org/v1/gonum/mat/offset_appengine.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine safe - -package mat - -import "reflect" - -var sizeOfFloat64 = int(reflect.TypeOf(float64(0)).Size()) - -// offset returns the number of float64 values b[0] is after a[0]. -func offset(a, b []float64) int { - va0 := reflect.ValueOf(a).Index(0) - vb0 := reflect.ValueOf(b).Index(0) - if va0.Addr() == vb0.Addr() { - return 0 - } - // This expression must be atomic with respect to GC moves. - // At this stage this is true, because the GC does not - // move. See https://golang.org/issue/12445. - return int(vb0.UnsafeAddr()-va0.UnsafeAddr()) / sizeOfFloat64 -} diff --git a/vendor/gonum.org/v1/gonum/mat/pool.go b/vendor/gonum.org/v1/gonum/mat/pool.go deleted file mode 100644 index 25ca29f18..000000000 --- a/vendor/gonum.org/v1/gonum/mat/pool.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright ©2014 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "sync" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -var tab64 = [64]byte{ - 0x3f, 0x00, 0x3a, 0x01, 0x3b, 0x2f, 0x35, 0x02, - 0x3c, 0x27, 0x30, 0x1b, 0x36, 0x21, 0x2a, 0x03, - 0x3d, 0x33, 0x25, 0x28, 0x31, 0x12, 0x1c, 0x14, - 0x37, 0x1e, 0x22, 0x0b, 0x2b, 0x0e, 0x16, 0x04, - 0x3e, 0x39, 0x2e, 0x34, 0x26, 0x1a, 0x20, 0x29, - 0x32, 0x24, 0x11, 0x13, 0x1d, 0x0a, 0x0d, 0x15, - 0x38, 0x2d, 0x19, 0x1f, 0x23, 0x10, 0x09, 0x0c, - 0x2c, 0x18, 0x0f, 0x08, 0x17, 0x07, 0x06, 0x05, -} - -// bits returns the ceiling of base 2 log of v. -// Approach based on http://stackoverflow.com/a/11398748. -func bits(v uint64) byte { - if v == 0 { - return 0 - } - v <<= 2 - v-- - v |= v >> 1 - v |= v >> 2 - v |= v >> 4 - v |= v >> 8 - v |= v >> 16 - v |= v >> 32 - return tab64[((v-(v>>1))*0x07EDD5E59A4E28C2)>>58] - 1 -} - -var ( - // pool contains size stratified workspace Dense pools. - // Each pool element i returns sized matrices with a data - // slice capped at 1< 2. - if !m.IsZero() { - if fr != r { - panic(ErrShape) - } - if _, lc := factors[len(factors)-1].Dims(); lc != c { - panic(ErrShape) - } - } - - dims := make([]int, len(factors)+1) - dims[0] = r - dims[len(dims)-1] = c - pc := fc - for i, f := range factors[1:] { - cr, cc := f.Dims() - dims[i+1] = cr - if pc != cr { - panic(ErrShape) - } - pc = cc - } - - return &multiplier{ - factors: factors, - dims: dims, - table: newTable(len(factors)), - } -} - -// optimize determines an optimal matrix multiply operation order. -func (p *multiplier) optimize() { - if debugProductWalk { - fmt.Printf("chain dims: %v\n", p.dims) - } - const maxInt = int(^uint(0) >> 1) - for f := 1; f < len(p.factors); f++ { - for i := 0; i < len(p.factors)-f; i++ { - j := i + f - p.table.set(i, j, entry{cost: maxInt}) - for k := i; k < j; k++ { - cost := p.table.at(i, k).cost + p.table.at(k+1, j).cost + p.dims[i]*p.dims[k+1]*p.dims[j+1] - if cost < p.table.at(i, j).cost { - p.table.set(i, j, entry{cost: cost, k: k}) - } - } - } - } -} - -// multiply walks the optimal operation tree found by optimize, -// leaving the final result in the stack. It returns the -// product, which may be copied but should be returned to -// the workspace pool. -func (p *multiplier) multiply() *Dense { - result, _ := p.multiplySubchain(0, len(p.factors)-1) - if debugProductWalk { - r, c := result.Dims() - fmt.Printf("\tpop result (%d×%d) cost=%d\n", r, c, p.table.at(0, len(p.factors)-1).cost) - } - return result.(*Dense) -} - -func (p *multiplier) multiplySubchain(i, j int) (m Matrix, intermediate bool) { - if i == j { - return p.factors[i], false - } - - a, aTmp := p.multiplySubchain(i, p.table.at(i, j).k) - b, bTmp := p.multiplySubchain(p.table.at(i, j).k+1, j) - - ar, ac := a.Dims() - br, bc := b.Dims() - if ac != br { - // Panic with a string since this - // is not a user-facing panic. - panic(ErrShape.Error()) - } - - if debugProductWalk { - fmt.Printf("\tpush f[%d] (%d×%d)%s * f[%d] (%d×%d)%s\n", - i, ar, ac, result(aTmp), j, br, bc, result(bTmp)) - } - - r := getWorkspace(ar, bc, false) - r.Mul(a, b) - if aTmp { - putWorkspace(a.(*Dense)) - } - if bTmp { - putWorkspace(b.(*Dense)) - } - return r, true -} - -type entry struct { - k int // is the chain subdivision index. - cost int // cost is the cost of the operation. -} - -// table is a row major n×n dynamic programming table. -type table struct { - n int - entries []entry -} - -func newTable(n int) table { - return table{n: n, entries: make([]entry, n*n)} -} - -func (t table) at(i, j int) entry { return t.entries[i*t.n+j] } -func (t table) set(i, j int, e entry) { t.entries[i*t.n+j] = e } - -type result bool - -func (r result) String() string { - if r { - return " (popped result)" - } - return "" -} diff --git a/vendor/gonum.org/v1/gonum/mat/qr.go b/vendor/gonum.org/v1/gonum/mat/qr.go deleted file mode 100644 index 70c489a26..000000000 --- a/vendor/gonum.org/v1/gonum/mat/qr.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/lapack64" -) - -const badQR = "mat: invalid QR factorization" - -// QR is a type for creating and using the QR factorization of a matrix. -type QR struct { - qr *Dense - tau []float64 - cond float64 -} - -func (qr *QR) updateCond(norm lapack.MatrixNorm) { - // Since A = Q*R, and Q is orthogonal, we get for the condition number κ - // κ(A) := |A| |A^-1| = |Q*R| |(Q*R)^-1| = |R| |R^-1 * Q^T| - // = |R| |R^-1| = κ(R), - // where we used that fact that Q^-1 = Q^T. However, this assumes that - // the matrix norm is invariant under orthogonal transformations which - // is not the case for CondNorm. Hopefully the error is negligible: κ - // is only a qualitative measure anyway. - n := qr.qr.mat.Cols - work := getFloats(3*n, false) - iwork := getInts(n, false) - r := qr.qr.asTriDense(n, blas.NonUnit, blas.Upper) - v := lapack64.Trcon(norm, r.mat, work, iwork) - putFloats(work) - putInts(iwork) - qr.cond = 1 / v -} - -// Factorize computes the QR factorization of an m×n matrix a where m >= n. The QR -// factorization always exists even if A is singular. -// -// The QR decomposition is a factorization of the matrix A such that A = Q * R. -// The matrix Q is an orthonormal m×m matrix, and R is an m×n upper triangular matrix. -// Q and R can be extracted using the QTo and RTo methods. -func (qr *QR) Factorize(a Matrix) { - qr.factorize(a, CondNorm) -} - -func (qr *QR) factorize(a Matrix, norm lapack.MatrixNorm) { - m, n := a.Dims() - if m < n { - panic(ErrShape) - } - k := min(m, n) - if qr.qr == nil { - qr.qr = &Dense{} - } - qr.qr.CloneFrom(a) - work := []float64{0} - qr.tau = make([]float64, k) - lapack64.Geqrf(qr.qr.mat, qr.tau, work, -1) - - work = getFloats(int(work[0]), false) - lapack64.Geqrf(qr.qr.mat, qr.tau, work, len(work)) - putFloats(work) - qr.updateCond(norm) -} - -// isValid returns whether the receiver contains a factorization. -func (qr *QR) isValid() bool { - return qr.qr != nil && !qr.qr.IsZero() -} - -// Cond returns the condition number for the factorized matrix. -// Cond will panic if the receiver does not contain a factorization. -func (qr *QR) Cond() float64 { - if !qr.isValid() { - panic(badQR) - } - return qr.cond -} - -// TODO(btracey): Add in the "Reduced" forms for extracting the n×n orthogonal -// and upper triangular matrices. - -// RTo extracts the m×n upper trapezoidal matrix from a QR decomposition. -// If dst is nil, a new matrix is allocated. The resulting dst matrix is returned. -// RTo will panic if the receiver does not contain a factorization. -func (qr *QR) RTo(dst *Dense) *Dense { - if !qr.isValid() { - panic(badQR) - } - - r, c := qr.qr.Dims() - if dst == nil { - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(r, c) - } - - // Disguise the QR as an upper triangular - t := &TriDense{ - mat: blas64.Triangular{ - N: c, - Stride: qr.qr.mat.Stride, - Data: qr.qr.mat.Data, - Uplo: blas.Upper, - Diag: blas.NonUnit, - }, - cap: qr.qr.capCols, - } - dst.Copy(t) - - // Zero below the triangular. - for i := r; i < c; i++ { - zero(dst.mat.Data[i*dst.mat.Stride : i*dst.mat.Stride+c]) - } - - return dst -} - -// QTo extracts the m×m orthonormal matrix Q from a QR decomposition. -// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. -// QTo will panic if the receiver does not contain a factorization. -func (qr *QR) QTo(dst *Dense) *Dense { - if !qr.isValid() { - panic(badQR) - } - - r, _ := qr.qr.Dims() - if dst == nil { - dst = NewDense(r, r, nil) - } else { - dst.reuseAsZeroed(r, r) - } - - // Set Q = I. - for i := 0; i < r*r; i += r + 1 { - dst.mat.Data[i] = 1 - } - - // Construct Q from the elementary reflectors. - work := []float64{0} - lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, -1) - work = getFloats(int(work[0]), false) - lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, len(work)) - putFloats(work) - - return dst -} - -// SolveTo finds a minimum-norm solution to a system of linear equations defined -// by the matrices A and b, where A is an m×n matrix represented in its QR factorized -// form. If A is singular or near-singular a Condition error is returned. -// See the documentation for Condition for more information. -// -// The minimization problem solved depends on the input parameters. -// If trans == false, find X such that ||A*X - B||_2 is minimized. -// If trans == true, find the minimum norm solution of A^T * X = B. -// The solution matrix, X, is stored in place into dst. -// SolveTo will panic if the receiver does not contain a factorization. -func (qr *QR) SolveTo(dst *Dense, trans bool, b Matrix) error { - if !qr.isValid() { - panic(badQR) - } - - r, c := qr.qr.Dims() - br, bc := b.Dims() - - // The QR solve algorithm stores the result in-place into the right hand side. - // The storage for the answer must be large enough to hold both b and x. - // However, this method's receiver must be the size of x. Copy b, and then - // copy the result into m at the end. - if trans { - if c != br { - panic(ErrShape) - } - dst.reuseAs(r, bc) - } else { - if r != br { - panic(ErrShape) - } - dst.reuseAs(c, bc) - } - // Do not need to worry about overlap between m and b because x has its own - // independent storage. - w := getWorkspace(max(r, c), bc, false) - w.Copy(b) - t := qr.qr.asTriDense(qr.qr.mat.Cols, blas.NonUnit, blas.Upper).mat - if trans { - ok := lapack64.Trtrs(blas.Trans, t, w.mat) - if !ok { - return Condition(math.Inf(1)) - } - for i := c; i < r; i++ { - zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) - } - work := []float64{0} - lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, -1) - work = getFloats(int(work[0]), false) - lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, len(work)) - putFloats(work) - } else { - work := []float64{0} - lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, -1) - work = getFloats(int(work[0]), false) - lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, len(work)) - putFloats(work) - - ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) - if !ok { - return Condition(math.Inf(1)) - } - } - // X was set above to be the correct size for the result. - dst.Copy(w) - putWorkspace(w) - if qr.cond > ConditionTolerance { - return Condition(qr.cond) - } - return nil -} - -// SolveVecTo finds a minimum-norm solution to a system of linear equations, -// Ax = b. -// See QR.SolveTo for the full documentation. -// SolveVecTo will panic if the receiver does not contain a factorization. -func (qr *QR) SolveVecTo(dst *VecDense, trans bool, b Vector) error { - if !qr.isValid() { - panic(badQR) - } - - r, c := qr.qr.Dims() - if _, bc := b.Dims(); bc != 1 { - panic(ErrShape) - } - - // The Solve implementation is non-trivial, so rather than duplicate the code, - // instead recast the VecDenses as Dense and call the matrix code. - bm := Matrix(b) - if rv, ok := b.(RawVectorer); ok { - bmat := rv.RawVector() - if dst != b { - dst.checkOverlap(bmat) - } - b := VecDense{mat: bmat} - bm = b.asDense() - } - if trans { - dst.reuseAs(r) - } else { - dst.reuseAs(c) - } - return qr.SolveTo(dst.asDense(), trans, bm) - -} diff --git a/vendor/gonum.org/v1/gonum/mat/shadow.go b/vendor/gonum.org/v1/gonum/mat/shadow.go deleted file mode 100644 index 6082362d5..000000000 --- a/vendor/gonum.org/v1/gonum/mat/shadow.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas/blas64" -) - -const ( - // regionOverlap is the panic string used for the general case - // of a matrix region overlap between a source and destination. - regionOverlap = "mat: bad region: overlap" - - // regionIdentity is the panic string used for the specific - // case of complete agreement between a source and a destination. - regionIdentity = "mat: bad region: identical" - - // mismatchedStrides is the panic string used for overlapping - // data slices with differing strides. - mismatchedStrides = "mat: bad region: different strides" -) - -// checkOverlap returns false if the receiver does not overlap data elements -// referenced by the parameter and panics otherwise. -// -// checkOverlap methods return a boolean to allow the check call to be added to a -// boolean expression, making use of short-circuit operators. -func checkOverlap(a, b blas64.General) bool { - if cap(a.Data) == 0 || cap(b.Data) == 0 { - return false - } - - off := offset(a.Data[:1], b.Data[:1]) - - if off == 0 { - // At least one element overlaps. - if a.Cols == b.Cols && a.Rows == b.Rows && a.Stride == b.Stride { - panic(regionIdentity) - } - panic(regionOverlap) - } - - if off > 0 && len(a.Data) <= off { - // We know a is completely before b. - return false - } - if off < 0 && len(b.Data) <= -off { - // We know a is completely after b. - return false - } - - if a.Stride != b.Stride && a.Stride != 1 && b.Stride != 1 { - // Too hard, so assume the worst; if either stride - // is one it will be caught in rectanglesOverlap. - panic(mismatchedStrides) - } - - if off < 0 { - off = -off - a.Cols, b.Cols = b.Cols, a.Cols - } - if rectanglesOverlap(off, a.Cols, b.Cols, min(a.Stride, b.Stride)) { - panic(regionOverlap) - } - return false -} - -func (m *Dense) checkOverlap(a blas64.General) bool { - return checkOverlap(m.RawMatrix(), a) -} - -func (m *Dense) checkOverlapMatrix(a Matrix) bool { - if m == a { - return false - } - var amat blas64.General - switch ar := a.(type) { - default: - return false - case RawMatrixer: - amat = ar.RawMatrix() - case RawSymmetricer: - amat = generalFromSymmetric(ar.RawSymmetric()) - case RawTriangular: - amat = generalFromTriangular(ar.RawTriangular()) - case RawVectorer: - r, c := a.Dims() - amat = generalFromVector(ar.RawVector(), r, c) - } - return m.checkOverlap(amat) -} - -func (s *SymDense) checkOverlap(a blas64.General) bool { - return checkOverlap(generalFromSymmetric(s.RawSymmetric()), a) -} - -func (s *SymDense) checkOverlapMatrix(a Matrix) bool { - if s == a { - return false - } - var amat blas64.General - switch ar := a.(type) { - default: - return false - case RawMatrixer: - amat = ar.RawMatrix() - case RawSymmetricer: - amat = generalFromSymmetric(ar.RawSymmetric()) - case RawTriangular: - amat = generalFromTriangular(ar.RawTriangular()) - case RawVectorer: - r, c := a.Dims() - amat = generalFromVector(ar.RawVector(), r, c) - } - return s.checkOverlap(amat) -} - -// generalFromSymmetric returns a blas64.General with the backing -// data and dimensions of a. -func generalFromSymmetric(a blas64.Symmetric) blas64.General { - return blas64.General{ - Rows: a.N, - Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } -} - -func (t *TriDense) checkOverlap(a blas64.General) bool { - return checkOverlap(generalFromTriangular(t.RawTriangular()), a) -} - -func (t *TriDense) checkOverlapMatrix(a Matrix) bool { - if t == a { - return false - } - var amat blas64.General - switch ar := a.(type) { - default: - return false - case RawMatrixer: - amat = ar.RawMatrix() - case RawSymmetricer: - amat = generalFromSymmetric(ar.RawSymmetric()) - case RawTriangular: - amat = generalFromTriangular(ar.RawTriangular()) - case RawVectorer: - r, c := a.Dims() - amat = generalFromVector(ar.RawVector(), r, c) - } - return t.checkOverlap(amat) -} - -// generalFromTriangular returns a blas64.General with the backing -// data and dimensions of a. -func generalFromTriangular(a blas64.Triangular) blas64.General { - return blas64.General{ - Rows: a.N, - Cols: a.N, - Stride: a.Stride, - Data: a.Data, - } -} - -func (v *VecDense) checkOverlap(a blas64.Vector) bool { - mat := v.mat - if cap(mat.Data) == 0 || cap(a.Data) == 0 { - return false - } - - off := offset(mat.Data[:1], a.Data[:1]) - - if off == 0 { - // At least one element overlaps. - if mat.Inc == a.Inc && len(mat.Data) == len(a.Data) { - panic(regionIdentity) - } - panic(regionOverlap) - } - - if off > 0 && len(mat.Data) <= off { - // We know v is completely before a. - return false - } - if off < 0 && len(a.Data) <= -off { - // We know v is completely after a. - return false - } - - if mat.Inc != a.Inc && mat.Inc != 1 && a.Inc != 1 { - // Too hard, so assume the worst; if either - // increment is one it will be caught below. - panic(mismatchedStrides) - } - inc := min(mat.Inc, a.Inc) - - if inc == 1 || off&inc == 0 { - panic(regionOverlap) - } - return false -} - -// generalFromVector returns a blas64.General with the backing -// data and dimensions of a. -func generalFromVector(a blas64.Vector, r, c int) blas64.General { - return blas64.General{ - Rows: r, - Cols: c, - Stride: a.Inc, - Data: a.Data, - } -} - -// rectanglesOverlap returns whether the strided rectangles a and b overlap -// when b is offset by off elements after a but has at least one element before -// the end of a. off must be positive. a and b have aCols and bCols respectively. -// -// rectanglesOverlap works by shifting both matrices left such that the left -// column of a is at 0. The column indexes are flattened by obtaining the shifted -// relative left and right column positions modulo the common stride. This allows -// direct comparison of the column offsets when the matrix backing data slices -// are known to overlap. -func rectanglesOverlap(off, aCols, bCols, stride int) bool { - if stride == 1 { - // Unit stride means overlapping data - // slices must overlap as matrices. - return true - } - - // Flatten the shifted matrix column positions - // so a starts at 0, modulo the common stride. - aTo := aCols - // The mod stride operations here make the from - // and to indexes comparable between a and b when - // the data slices of a and b overlap. - bFrom := off % stride - bTo := (bFrom + bCols) % stride - - if bTo == 0 || bFrom < bTo { - // b matrix is not wrapped: compare for - // simple overlap. - return bFrom < aTo - } - - // b strictly wraps and so must overlap with a. - return true -} diff --git a/vendor/gonum.org/v1/gonum/mat/solve.go b/vendor/gonum.org/v1/gonum/mat/solve.go deleted file mode 100644 index 11813280f..000000000 --- a/vendor/gonum.org/v1/gonum/mat/solve.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack/lapack64" -) - -// Solve finds a minimum-norm solution to a system of linear equations defined -// by the matrices A and B. If A is singular or near-singular, a Condition error -// is returned. See the documentation for Condition for more information. -// -// The minimization problem solved depends on the input parameters: -// - if m >= n, find X such that ||A*X - B||_2 is minimized, -// - if m < n, find the minimum norm solution of A * X = B. -// The solution matrix, X, is stored in-place into the receiver. -func (m *Dense) Solve(a, b Matrix) error { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br { - panic(ErrShape) - } - m.reuseAs(ac, bc) - - // TODO(btracey): Add special cases for SymDense, etc. - aU, aTrans := untranspose(a) - bU, bTrans := untranspose(b) - switch rma := aU.(type) { - case RawTriangular: - side := blas.Left - tA := blas.NoTrans - if aTrans { - tA = blas.Trans - } - - switch rm := bU.(type) { - case RawMatrixer: - if m != bU || bTrans { - if m == bU || m.checkOverlap(rm.RawMatrix()) { - tmp := getWorkspace(br, bc, false) - tmp.Copy(b) - m.Copy(tmp) - putWorkspace(tmp) - break - } - m.Copy(b) - } - default: - if m != bU { - m.Copy(b) - } else if bTrans { - // m and b share data so Copy cannot be used directly. - tmp := getWorkspace(br, bc, false) - tmp.Copy(b) - m.Copy(tmp) - putWorkspace(tmp) - } - } - - rm := rma.RawTriangular() - blas64.Trsm(side, tA, 1, rm, m.mat) - work := getFloats(3*rm.N, false) - iwork := getInts(rm.N, false) - cond := lapack64.Trcon(CondNorm, rm, work, iwork) - putFloats(work) - putInts(iwork) - if cond > ConditionTolerance { - return Condition(cond) - } - return nil - } - - switch { - case ar == ac: - if a == b { - // x = I. - if ar == 1 { - m.mat.Data[0] = 1 - return nil - } - for i := 0; i < ar; i++ { - v := m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+ac] - zero(v) - v[i] = 1 - } - return nil - } - var lu LU - lu.Factorize(a) - return lu.SolveTo(m, false, b) - case ar > ac: - var qr QR - qr.Factorize(a) - return qr.SolveTo(m, false, b) - default: - var lq LQ - lq.Factorize(a) - return lq.SolveTo(m, false, b) - } -} - -// SolveVec finds a minimum-norm solution to a system of linear equations defined -// by the matrix a and the right-hand side column vector b. If A is singular or -// near-singular, a Condition error is returned. See the documentation for -// Dense.Solve for more information. -func (v *VecDense) SolveVec(a Matrix, b Vector) error { - if _, bc := b.Dims(); bc != 1 { - panic(ErrShape) - } - _, c := a.Dims() - - // The Solve implementation is non-trivial, so rather than duplicate the code, - // instead recast the VecDenses as Dense and call the matrix code. - - if rv, ok := b.(RawVectorer); ok { - bmat := rv.RawVector() - if v != b { - v.checkOverlap(bmat) - } - v.reuseAs(c) - m := v.asDense() - // We conditionally create bm as m when b and v are identical - // to prevent the overlap detection code from identifying m - // and bm as overlapping but not identical. - bm := m - if v != b { - b := VecDense{mat: bmat} - bm = b.asDense() - } - return m.Solve(a, bm) - } - - v.reuseAs(c) - m := v.asDense() - return m.Solve(a, b) -} diff --git a/vendor/gonum.org/v1/gonum/mat/svd.go b/vendor/gonum.org/v1/gonum/mat/svd.go deleted file mode 100644 index 2f55c4114..000000000 --- a/vendor/gonum.org/v1/gonum/mat/svd.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack" - "gonum.org/v1/gonum/lapack/lapack64" -) - -// SVD is a type for creating and using the Singular Value Decomposition (SVD) -// of a matrix. -type SVD struct { - kind SVDKind - - s []float64 - u blas64.General - vt blas64.General -} - -// SVDKind specifies the treatment of singular vectors during an SVD -// factorization. -type SVDKind int - -const ( - // SVDNone specifies that no singular vectors should be computed during - // the decomposition. - SVDNone SVDKind = 0 - - // SVDThinU specifies the thin decomposition for U should be computed. - SVDThinU SVDKind = 1 << (iota - 1) - // SVDFullU specifies the full decomposition for U should be computed. - SVDFullU - // SVDThinV specifies the thin decomposition for V should be computed. - SVDThinV - // SVDFullV specifies the full decomposition for V should be computed. - SVDFullV - - // SVDThin is a convenience value for computing both thin vectors. - SVDThin SVDKind = SVDThinU | SVDThinV - // SVDThin is a convenience value for computing both full vectors. - SVDFull SVDKind = SVDFullU | SVDFullV -) - -// succFact returns whether the receiver contains a successful factorization. -func (svd *SVD) succFact() bool { - return len(svd.s) != 0 -} - -// Factorize computes the singular value decomposition (SVD) of the input matrix A. -// The singular values of A are computed in all cases, while the singular -// vectors are optionally computed depending on the input kind. -// -// The full singular value decomposition (kind == SVDFull) is a factorization -// of an m×n matrix A of the form -// A = U * Σ * V^T -// where Σ is an m×n diagonal matrix, U is an m×m orthogonal matrix, and V is an -// n×n orthogonal matrix. The diagonal elements of Σ are the singular values of A. -// The first min(m,n) columns of U and V are, respectively, the left and right -// singular vectors of A. -// -// Significant storage space can be saved by using the thin representation of -// the SVD (kind == SVDThin) instead of the full SVD, especially if -// m >> n or m << n. The thin SVD finds -// A = U~ * Σ * V~^T -// where U~ is of size m×min(m,n), Σ is a diagonal matrix of size min(m,n)×min(m,n) -// and V~ is of size n×min(m,n). -// -// Factorize returns whether the decomposition succeeded. If the decomposition -// failed, routines that require a successful factorization will panic. -func (svd *SVD) Factorize(a Matrix, kind SVDKind) (ok bool) { - // kill previous factorization - svd.s = svd.s[:0] - svd.kind = kind - - m, n := a.Dims() - var jobU, jobVT lapack.SVDJob - - // TODO(btracey): This code should be modified to have the smaller - // matrix written in-place into aCopy when the lapack/native/dgesvd - // implementation is complete. - switch { - case kind&SVDFullU != 0: - jobU = lapack.SVDAll - svd.u = blas64.General{ - Rows: m, - Cols: m, - Stride: m, - Data: use(svd.u.Data, m*m), - } - case kind&SVDThinU != 0: - jobU = lapack.SVDStore - svd.u = blas64.General{ - Rows: m, - Cols: min(m, n), - Stride: min(m, n), - Data: use(svd.u.Data, m*min(m, n)), - } - default: - jobU = lapack.SVDNone - } - switch { - case kind&SVDFullV != 0: - svd.vt = blas64.General{ - Rows: n, - Cols: n, - Stride: n, - Data: use(svd.vt.Data, n*n), - } - jobVT = lapack.SVDAll - case kind&SVDThinV != 0: - svd.vt = blas64.General{ - Rows: min(m, n), - Cols: n, - Stride: n, - Data: use(svd.vt.Data, min(m, n)*n), - } - jobVT = lapack.SVDStore - default: - jobVT = lapack.SVDNone - } - - // A is destroyed on call, so copy the matrix. - aCopy := DenseCopyOf(a) - svd.kind = kind - svd.s = use(svd.s, min(m, n)) - - work := []float64{0} - lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, -1) - work = getFloats(int(work[0]), false) - ok = lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, len(work)) - putFloats(work) - if !ok { - svd.kind = 0 - } - return ok -} - -// Kind returns the SVDKind of the decomposition. If no decomposition has been -// computed, Kind returns -1. -func (svd *SVD) Kind() SVDKind { - if !svd.succFact() { - return -1 - } - return svd.kind -} - -// Cond returns the 2-norm condition number for the factorized matrix. Cond will -// panic if the receiver does not contain a successful factorization. -func (svd *SVD) Cond() float64 { - if !svd.succFact() { - panic(badFact) - } - return svd.s[0] / svd.s[len(svd.s)-1] -} - -// Values returns the singular values of the factorized matrix in descending order. -// -// If the input slice is non-nil, the values will be stored in-place into -// the slice. In this case, the slice must have length min(m,n), and Values will -// panic with ErrSliceLengthMismatch otherwise. If the input slice is nil, a new -// slice of the appropriate length will be allocated and returned. -// -// Values will panic if the receiver does not contain a successful factorization. -func (svd *SVD) Values(s []float64) []float64 { - if !svd.succFact() { - panic(badFact) - } - if s == nil { - s = make([]float64, len(svd.s)) - } - if len(s) != len(svd.s) { - panic(ErrSliceLengthMismatch) - } - copy(s, svd.s) - return s -} - -// UTo extracts the matrix U from the singular value decomposition. The first -// min(m,n) columns are the left singular vectors and correspond to the singular -// values as returned from SVD.Values. -// -// If dst is not nil, U is stored in-place into dst, and dst must have size -// m×m if the full U was computed, size m×min(m,n) if the thin U was computed, -// and UTo panics otherwise. If dst is nil, a new matrix of the appropriate size -// is allocated and returned. -func (svd *SVD) UTo(dst *Dense) *Dense { - if !svd.succFact() { - panic(badFact) - } - kind := svd.kind - if kind&SVDThinU == 0 && kind&SVDFullU == 0 { - panic("svd: u not computed during factorization") - } - r := svd.u.Rows - c := svd.u.Cols - if dst == nil { - dst = NewDense(r, c, nil) - } else { - dst.reuseAs(r, c) - } - - tmp := &Dense{ - mat: svd.u, - capRows: r, - capCols: c, - } - dst.Copy(tmp) - - return dst -} - -// VTo extracts the matrix V from the singular value decomposition. The first -// min(m,n) columns are the right singular vectors and correspond to the singular -// values as returned from SVD.Values. -// -// If dst is not nil, V is stored in-place into dst, and dst must have size -// n×n if the full V was computed, size n×min(m,n) if the thin V was computed, -// and VTo panics otherwise. If dst is nil, a new matrix of the appropriate size -// is allocated and returned. -func (svd *SVD) VTo(dst *Dense) *Dense { - if !svd.succFact() { - panic(badFact) - } - kind := svd.kind - if kind&SVDThinU == 0 && kind&SVDFullV == 0 { - panic("svd: v not computed during factorization") - } - r := svd.vt.Rows - c := svd.vt.Cols - if dst == nil { - dst = NewDense(c, r, nil) - } else { - dst.reuseAs(c, r) - } - - tmp := &Dense{ - mat: svd.vt, - capRows: r, - capCols: c, - } - dst.Copy(tmp.T()) - - return dst -} diff --git a/vendor/gonum.org/v1/gonum/mat/symband.go b/vendor/gonum.org/v1/gonum/mat/symband.go deleted file mode 100644 index cc4be1976..000000000 --- a/vendor/gonum.org/v1/gonum/mat/symband.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -var ( - symBandDense *SymBandDense - _ Matrix = symBandDense - _ Symmetric = symBandDense - _ Banded = symBandDense - _ SymBanded = symBandDense - _ RawSymBander = symBandDense - _ MutableSymBanded = symBandDense - - _ NonZeroDoer = symBandDense - _ RowNonZeroDoer = symBandDense - _ ColNonZeroDoer = symBandDense -) - -// SymBandDense represents a symmetric band matrix in dense storage format. -type SymBandDense struct { - mat blas64.SymmetricBand -} - -// SymBanded is a symmetric band matrix interface type. -type SymBanded interface { - Banded - - // Symmetric returns the number of rows/columns in the matrix. - Symmetric() int - - // SymBand returns the number of rows/columns in the matrix, and the size of - // the bandwidth. - SymBand() (n, k int) -} - -// MutableSymBanded is a symmetric band matrix interface type that allows elements -// to be altered. -type MutableSymBanded interface { - SymBanded - SetSymBand(i, j int, v float64) -} - -// A RawSymBander can return a blas64.SymmetricBand representation of the receiver. -// Changes to the blas64.SymmetricBand.Data slice will be reflected in the original -// matrix, changes to the N, K, Stride and Uplo fields will not. -type RawSymBander interface { - RawSymBand() blas64.SymmetricBand -} - -// NewSymBandDense creates a new SymBand matrix with n rows and columns. If data == nil, -// a new slice is allocated for the backing slice. If len(data) == n*(k+1), -// data is used as the backing slice, and changes to the elements of the returned -// SymBandDense will be reflected in data. If neither of these is true, NewSymBandDense -// will panic. k must be at least zero and less than n, otherwise NewSymBandDense will panic. -// -// The data must be arranged in row-major order constructed by removing the zeros -// from the rows outside the band and aligning the diagonals. SymBandDense matrices -// are stored in the upper triangle. For example, the matrix -// 1 2 3 0 0 0 -// 2 4 5 6 0 0 -// 3 5 7 8 9 0 -// 0 6 8 10 11 12 -// 0 0 9 11 13 14 -// 0 0 0 12 14 15 -// becomes (* entries are never accessed) -// 1 2 3 -// 4 5 6 -// 7 8 9 -// 10 11 12 -// 13 14 * -// 15 * * -// which is passed to NewSymBandDense as []float64{1, 2, ..., 15, *, *, *} with k=2. -// Only the values in the band portion of the matrix are used. -func NewSymBandDense(n, k int, data []float64) *SymBandDense { - if n <= 0 || k < 0 { - if n == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if k+1 > n { - panic("mat: band out of range") - } - bc := k + 1 - if data != nil && len(data) != n*bc { - panic(ErrShape) - } - if data == nil { - data = make([]float64, n*bc) - } - return &SymBandDense{ - mat: blas64.SymmetricBand{ - N: n, - K: k, - Stride: bc, - Uplo: blas.Upper, - Data: data, - }, - } -} - -// Dims returns the number of rows and columns in the matrix. -func (s *SymBandDense) Dims() (r, c int) { - return s.mat.N, s.mat.N -} - -// Symmetric returns the size of the receiver. -func (s *SymBandDense) Symmetric() int { - return s.mat.N -} - -// Bandwidth returns the bandwidths of the matrix. -func (s *SymBandDense) Bandwidth() (kl, ku int) { - return s.mat.K, s.mat.K -} - -// SymBand returns the number of rows/columns in the matrix, and the size of -// the bandwidth. -func (s *SymBandDense) SymBand() (n, k int) { - return s.mat.N, s.mat.K -} - -// T implements the Matrix interface. Symmetric matrices, by definition, are -// equal to their transpose, and this is a no-op. -func (s *SymBandDense) T() Matrix { - return s -} - -// TBand implements the Banded interface. -func (s *SymBandDense) TBand() Banded { - return s -} - -// RawSymBand returns the underlying blas64.SymBand used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in returned blas64.SymBand. -func (s *SymBandDense) RawSymBand() blas64.SymmetricBand { - return s.mat -} - -// SetRawSymBand sets the underlying blas64.SymmetricBand used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in the input. -// -// The supplied SymmetricBand must use blas.Upper storage format. -func (s *SymBandDense) SetRawSymBand(mat blas64.SymmetricBand) { - if mat.Uplo != blas.Upper { - panic("mat: blas64.SymmetricBand does not have blas.Upper storage") - } - s.mat = mat -} - -// Zero sets all of the matrix elements to zero. -func (s *SymBandDense) Zero() { - for i := 0; i < s.mat.N; i++ { - u := min(1+s.mat.K, s.mat.N-i) - zero(s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+u]) - } -} - -// DiagView returns the diagonal as a matrix backed by the original data. -func (s *SymBandDense) DiagView() Diagonal { - n := s.mat.N - return &DiagDense{ - mat: blas64.Vector{ - N: n, - Inc: s.mat.Stride, - Data: s.mat.Data[:(n-1)*s.mat.Stride+1], - }, - } -} - -// DoNonZero calls the function fn for each of the non-zero elements of s. The function fn -// takes a row/column index and the element value of s at (i, j). -func (s *SymBandDense) DoNonZero(fn func(i, j int, v float64)) { - for i := 0; i < s.mat.N; i++ { - for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { - v := s.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - } -} - -// DoRowNonZero calls the function fn for each of the non-zero elements of row i of s. The function fn -// takes a row/column index and the element value of s at (i, j). -func (s *SymBandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { - if i < 0 || s.mat.N <= i { - panic(ErrRowAccess) - } - for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { - v := s.at(i, j) - if v != 0 { - fn(i, j, v) - } - } -} - -// DoColNonZero calls the function fn for each of the non-zero elements of column j of s. The function fn -// takes a row/column index and the element value of s at (i, j). -func (s *SymBandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { - if j < 0 || s.mat.N <= j { - panic(ErrColAccess) - } - for i := 0; i < s.mat.N; i++ { - if i-s.mat.K <= j && j < i+s.mat.K+1 { - v := s.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - } -} - -// Trace returns the trace. -func (s *SymBandDense) Trace() float64 { - rb := s.RawSymBand() - var tr float64 - for i := 0; i < rb.N; i++ { - tr += rb.Data[i*rb.Stride] - } - return tr -} diff --git a/vendor/gonum.org/v1/gonum/mat/symmetric.go b/vendor/gonum.org/v1/gonum/mat/symmetric.go deleted file mode 100644 index 3d6181c7c..000000000 --- a/vendor/gonum.org/v1/gonum/mat/symmetric.go +++ /dev/null @@ -1,605 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -var ( - symDense *SymDense - - _ Matrix = symDense - _ Symmetric = symDense - _ RawSymmetricer = symDense - _ MutableSymmetric = symDense -) - -const ( - badSymTriangle = "mat: blas64.Symmetric not upper" - badSymCap = "mat: bad capacity for SymDense" -) - -// SymDense is a symmetric matrix that uses dense storage. SymDense -// matrices are stored in the upper triangle. -type SymDense struct { - mat blas64.Symmetric - cap int -} - -// Symmetric represents a symmetric matrix (where the element at {i, j} equals -// the element at {j, i}). Symmetric matrices are always square. -type Symmetric interface { - Matrix - // Symmetric returns the number of rows/columns in the matrix. - Symmetric() int -} - -// A RawSymmetricer can return a view of itself as a BLAS Symmetric matrix. -type RawSymmetricer interface { - RawSymmetric() blas64.Symmetric -} - -// A MutableSymmetric can set elements of a symmetric matrix. -type MutableSymmetric interface { - Symmetric - SetSym(i, j int, v float64) -} - -// NewSymDense creates a new Symmetric matrix with n rows and columns. If data == nil, -// a new slice is allocated for the backing slice. If len(data) == n*n, data is -// used as the backing slice, and changes to the elements of the returned SymDense -// will be reflected in data. If neither of these is true, NewSymDense will panic. -// NewSymDense will panic if n is zero. -// -// The data must be arranged in row-major order, i.e. the (i*c + j)-th -// element in the data slice is the {i, j}-th element in the matrix. -// Only the values in the upper triangular portion of the matrix are used. -func NewSymDense(n int, data []float64) *SymDense { - if n <= 0 { - if n == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if data != nil && n*n != len(data) { - panic(ErrShape) - } - if data == nil { - data = make([]float64, n*n) - } - return &SymDense{ - mat: blas64.Symmetric{ - N: n, - Stride: n, - Data: data, - Uplo: blas.Upper, - }, - cap: n, - } -} - -// Dims returns the number of rows and columns in the matrix. -func (s *SymDense) Dims() (r, c int) { - return s.mat.N, s.mat.N -} - -// Caps returns the number of rows and columns in the backing matrix. -func (s *SymDense) Caps() (r, c int) { - return s.cap, s.cap -} - -// T returns the receiver, the transpose of a symmetric matrix. -func (s *SymDense) T() Matrix { - return s -} - -// Symmetric implements the Symmetric interface and returns the number of rows -// and columns in the matrix. -func (s *SymDense) Symmetric() int { - return s.mat.N -} - -// RawSymmetric returns the matrix as a blas64.Symmetric. The returned -// value must be stored in upper triangular format. -func (s *SymDense) RawSymmetric() blas64.Symmetric { - return s.mat -} - -// SetRawSymmetric sets the underlying blas64.Symmetric used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in the input. -// -// The supplied Symmetric must use blas.Upper storage format. -func (s *SymDense) SetRawSymmetric(mat blas64.Symmetric) { - if mat.Uplo != blas.Upper { - panic(badSymTriangle) - } - s.cap = mat.N - s.mat = mat -} - -// Reset zeros the dimensions of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (s *SymDense) Reset() { - // N and Stride must be zeroed in unison. - s.mat.N, s.mat.Stride = 0, 0 - s.mat.Data = s.mat.Data[:0] -} - -// Zero sets all of the matrix elements to zero. -func (s *SymDense) Zero() { - for i := 0; i < s.mat.N; i++ { - zero(s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+s.mat.N]) - } -} - -// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the -// receiver for size-restricted operations. SymDense matrices can be zeroed using Reset. -func (s *SymDense) IsZero() bool { - // It must be the case that m.Dims() returns - // zeros in this case. See comment in Reset(). - return s.mat.N == 0 -} - -// reuseAs resizes an empty matrix to a n×n matrix, -// or checks that a non-empty matrix is n×n. -func (s *SymDense) reuseAs(n int) { - if n == 0 { - panic(ErrZeroLength) - } - if s.mat.N > s.cap { - panic(badSymCap) - } - if s.IsZero() { - s.mat = blas64.Symmetric{ - N: n, - Stride: n, - Data: use(s.mat.Data, n*n), - Uplo: blas.Upper, - } - s.cap = n - return - } - if s.mat.Uplo != blas.Upper { - panic(badSymTriangle) - } - if s.mat.N != n { - panic(ErrShape) - } -} - -func (s *SymDense) isolatedWorkspace(a Symmetric) (w *SymDense, restore func()) { - n := a.Symmetric() - if n == 0 { - panic(ErrZeroLength) - } - w = getWorkspaceSym(n, false) - return w, func() { - s.CopySym(w) - putWorkspaceSym(w) - } -} - -// DiagView returns the diagonal as a matrix backed by the original data. -func (s *SymDense) DiagView() Diagonal { - n := s.mat.N - return &DiagDense{ - mat: blas64.Vector{ - N: n, - Inc: s.mat.Stride + 1, - Data: s.mat.Data[:(n-1)*s.mat.Stride+n], - }, - } -} - -func (s *SymDense) AddSym(a, b Symmetric) { - n := a.Symmetric() - if n != b.Symmetric() { - panic(ErrShape) - } - s.reuseAs(n) - - if a, ok := a.(RawSymmetricer); ok { - if b, ok := b.(RawSymmetricer); ok { - amat, bmat := a.RawSymmetric(), b.RawSymmetric() - if s != a { - s.checkOverlap(generalFromSymmetric(amat)) - } - if s != b { - s.checkOverlap(generalFromSymmetric(bmat)) - } - for i := 0; i < n; i++ { - btmp := bmat.Data[i*bmat.Stride+i : i*bmat.Stride+n] - stmp := s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+n] - for j, v := range amat.Data[i*amat.Stride+i : i*amat.Stride+n] { - stmp[j] = v + btmp[j] - } - } - return - } - } - - s.checkOverlapMatrix(a) - s.checkOverlapMatrix(b) - for i := 0; i < n; i++ { - stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] - for j := i; j < n; j++ { - stmp[j] = a.At(i, j) + b.At(i, j) - } - } -} - -func (s *SymDense) CopySym(a Symmetric) int { - n := a.Symmetric() - n = min(n, s.mat.N) - if n == 0 { - return 0 - } - switch a := a.(type) { - case RawSymmetricer: - amat := a.RawSymmetric() - if amat.Uplo != blas.Upper { - panic(badSymTriangle) - } - for i := 0; i < n; i++ { - copy(s.mat.Data[i*s.mat.Stride+i:i*s.mat.Stride+n], amat.Data[i*amat.Stride+i:i*amat.Stride+n]) - } - default: - for i := 0; i < n; i++ { - stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] - for j := i; j < n; j++ { - stmp[j] = a.At(i, j) - } - } - } - return n -} - -// SymRankOne performs a symmetric rank-one update to the matrix a with x, -// which is treated as a column vector, and stores the result in the receiver -// s = a + alpha * x * x^T -func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x Vector) { - n := x.Len() - if a.Symmetric() != n { - panic(ErrShape) - } - s.reuseAs(n) - - if s != a { - if rs, ok := a.(RawSymmetricer); ok { - s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) - } - s.CopySym(a) - } - - xU, _ := untranspose(x) - if rv, ok := xU.(RawVectorer); ok { - r, c := xU.Dims() - xmat := rv.RawVector() - s.checkOverlap(generalFromVector(xmat, r, c)) - blas64.Syr(alpha, xmat, s.mat) - return - } - - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - s.set(i, j, s.at(i, j)+alpha*x.AtVec(i)*x.AtVec(j)) - } - } -} - -// SymRankK performs a symmetric rank-k update to the matrix a and stores the -// result into the receiver. If a is zero, see SymOuterK. -// s = a + alpha * x * x' -func (s *SymDense) SymRankK(a Symmetric, alpha float64, x Matrix) { - n := a.Symmetric() - r, _ := x.Dims() - if r != n { - panic(ErrShape) - } - xMat, aTrans := untranspose(x) - var g blas64.General - if rm, ok := xMat.(RawMatrixer); ok { - g = rm.RawMatrix() - } else { - g = DenseCopyOf(x).mat - aTrans = false - } - if a != s { - if rs, ok := a.(RawSymmetricer); ok { - s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) - } - s.reuseAs(n) - s.CopySym(a) - } - t := blas.NoTrans - if aTrans { - t = blas.Trans - } - blas64.Syrk(t, alpha, g, 1, s.mat) -} - -// SymOuterK calculates the outer product of x with itself and stores -// the result into the receiver. It is equivalent to the matrix -// multiplication -// s = alpha * x * x'. -// In order to update an existing matrix, see SymRankOne. -func (s *SymDense) SymOuterK(alpha float64, x Matrix) { - n, _ := x.Dims() - switch { - case s.IsZero(): - s.mat = blas64.Symmetric{ - N: n, - Stride: n, - Data: useZeroed(s.mat.Data, n*n), - Uplo: blas.Upper, - } - s.cap = n - s.SymRankK(s, alpha, x) - case s.mat.Uplo != blas.Upper: - panic(badSymTriangle) - case s.mat.N == n: - if s == x { - w := getWorkspaceSym(n, true) - w.SymRankK(w, alpha, x) - s.CopySym(w) - putWorkspaceSym(w) - } else { - switch r := x.(type) { - case RawMatrixer: - s.checkOverlap(r.RawMatrix()) - case RawSymmetricer: - s.checkOverlap(generalFromSymmetric(r.RawSymmetric())) - case RawTriangular: - s.checkOverlap(generalFromTriangular(r.RawTriangular())) - } - // Only zero the upper triangle. - for i := 0; i < n; i++ { - ri := i * s.mat.Stride - zero(s.mat.Data[ri+i : ri+n]) - } - s.SymRankK(s, alpha, x) - } - default: - panic(ErrShape) - } -} - -// RankTwo performs a symmetric rank-two update to the matrix a with the -// vectors x and y, which are treated as column vectors, and stores the -// result in the receiver -// m = a + alpha * (x * y^T + y * x^T) -func (s *SymDense) RankTwo(a Symmetric, alpha float64, x, y Vector) { - n := s.mat.N - if x.Len() != n { - panic(ErrShape) - } - if y.Len() != n { - panic(ErrShape) - } - - if s != a { - if rs, ok := a.(RawSymmetricer); ok { - s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) - } - } - - var xmat, ymat blas64.Vector - fast := true - xU, _ := untranspose(x) - if rv, ok := xU.(RawVectorer); ok { - r, c := xU.Dims() - xmat = rv.RawVector() - s.checkOverlap(generalFromVector(xmat, r, c)) - } else { - fast = false - } - yU, _ := untranspose(y) - if rv, ok := yU.(RawVectorer); ok { - r, c := yU.Dims() - ymat = rv.RawVector() - s.checkOverlap(generalFromVector(ymat, r, c)) - } else { - fast = false - } - - if s != a { - if rs, ok := a.(RawSymmetricer); ok { - s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) - } - s.reuseAs(n) - s.CopySym(a) - } - - if fast { - if s != a { - s.reuseAs(n) - s.CopySym(a) - } - blas64.Syr2(alpha, xmat, ymat, s.mat) - return - } - - for i := 0; i < n; i++ { - s.reuseAs(n) - for j := i; j < n; j++ { - s.set(i, j, a.At(i, j)+alpha*(x.AtVec(i)*y.AtVec(j)+y.AtVec(i)*x.AtVec(j))) - } - } -} - -// ScaleSym multiplies the elements of a by f, placing the result in the receiver. -func (s *SymDense) ScaleSym(f float64, a Symmetric) { - n := a.Symmetric() - s.reuseAs(n) - if a, ok := a.(RawSymmetricer); ok { - amat := a.RawSymmetric() - if s != a { - s.checkOverlap(generalFromSymmetric(amat)) - } - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - s.mat.Data[i*s.mat.Stride+j] = f * amat.Data[i*amat.Stride+j] - } - } - return - } - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - s.mat.Data[i*s.mat.Stride+j] = f * a.At(i, j) - } - } -} - -// SubsetSym extracts a subset of the rows and columns of the matrix a and stores -// the result in-place into the receiver. The resulting matrix size is -// len(set)×len(set). Specifically, at the conclusion of SubsetSym, -// s.At(i, j) equals a.At(set[i], set[j]). Note that the supplied set does not -// have to be a strict subset, dimension repeats are allowed. -func (s *SymDense) SubsetSym(a Symmetric, set []int) { - n := len(set) - na := a.Symmetric() - s.reuseAs(n) - var restore func() - if a == s { - s, restore = s.isolatedWorkspace(a) - defer restore() - } - - if a, ok := a.(RawSymmetricer); ok { - raw := a.RawSymmetric() - if s != a { - s.checkOverlap(generalFromSymmetric(raw)) - } - for i := 0; i < n; i++ { - ssub := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] - r := set[i] - rsub := raw.Data[r*raw.Stride : r*raw.Stride+na] - for j := i; j < n; j++ { - c := set[j] - if r <= c { - ssub[j] = rsub[c] - } else { - ssub[j] = raw.Data[c*raw.Stride+r] - } - } - } - return - } - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - s.mat.Data[i*s.mat.Stride+j] = a.At(set[i], set[j]) - } - } -} - -// SliceSym returns a new Matrix that shares backing data with the receiver. -// The returned matrix starts at {i,i} of the receiver and extends k-i rows -// and columns. The final row and column in the resulting matrix is k-1. -// SliceSym panics with ErrIndexOutOfRange if the slice is outside the -// capacity of the receiver. -func (s *SymDense) SliceSym(i, k int) Symmetric { - sz := s.cap - if i < 0 || sz < i || k < i || sz < k { - panic(ErrIndexOutOfRange) - } - v := *s - v.mat.Data = s.mat.Data[i*s.mat.Stride+i : (k-1)*s.mat.Stride+k] - v.mat.N = k - i - v.cap = s.cap - i - return &v -} - -// Trace returns the trace of the matrix. -func (s *SymDense) Trace() float64 { - // TODO(btracey): could use internal asm sum routine. - var v float64 - for i := 0; i < s.mat.N; i++ { - v += s.mat.Data[i*s.mat.Stride+i] - } - return v -} - -// GrowSym returns the receiver expanded by n rows and n columns. If the -// dimensions of the expanded matrix are outside the capacity of the receiver -// a new allocation is made, otherwise not. Note that the receiver itself is -// not modified during the call to GrowSquare. -func (s *SymDense) GrowSym(n int) Symmetric { - if n < 0 { - panic(ErrIndexOutOfRange) - } - if n == 0 { - return s - } - var v SymDense - n += s.mat.N - if n > s.cap { - v.mat = blas64.Symmetric{ - N: n, - Stride: n, - Uplo: blas.Upper, - Data: make([]float64, n*n), - } - v.cap = n - // Copy elements, including those not currently visible. Use a temporary - // structure to avoid modifying the receiver. - var tmp SymDense - tmp.mat = blas64.Symmetric{ - N: s.cap, - Stride: s.mat.Stride, - Data: s.mat.Data, - Uplo: s.mat.Uplo, - } - tmp.cap = s.cap - v.CopySym(&tmp) - return &v - } - v.mat = blas64.Symmetric{ - N: n, - Stride: s.mat.Stride, - Uplo: blas.Upper, - Data: s.mat.Data[:(n-1)*s.mat.Stride+n], - } - v.cap = s.cap - return &v -} - -// PowPSD computes a^pow where a is a positive symmetric definite matrix. -// -// PowPSD returns an error if the matrix is not not positive symmetric definite -// or the Eigendecomposition is not successful. -func (s *SymDense) PowPSD(a Symmetric, pow float64) error { - dim := a.Symmetric() - s.reuseAs(dim) - - var eigen EigenSym - ok := eigen.Factorize(a, true) - if !ok { - return ErrFailedEigen - } - values := eigen.Values(nil) - for i, v := range values { - if v <= 0 { - return ErrNotPSD - } - values[i] = math.Pow(v, pow) - } - u := eigen.VectorsTo(nil) - - s.SymOuterK(values[0], u.ColView(0)) - - var v VecDense - for i := 1; i < dim; i++ { - v.ColViewOf(u, i) - s.SymRankOne(s, values[i], &v) - } - return nil -} diff --git a/vendor/gonum.org/v1/gonum/mat/triangular.go b/vendor/gonum.org/v1/gonum/mat/triangular.go deleted file mode 100644 index 65cfa1e3c..000000000 --- a/vendor/gonum.org/v1/gonum/mat/triangular.go +++ /dev/null @@ -1,684 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "math" - - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/lapack/lapack64" -) - -var ( - triDense *TriDense - _ Matrix = triDense - _ Triangular = triDense - _ RawTriangular = triDense - _ MutableTriangular = triDense - - _ NonZeroDoer = triDense - _ RowNonZeroDoer = triDense - _ ColNonZeroDoer = triDense -) - -const badTriCap = "mat: bad capacity for TriDense" - -// TriDense represents an upper or lower triangular matrix in dense storage -// format. -type TriDense struct { - mat blas64.Triangular - cap int -} - -// Triangular represents a triangular matrix. Triangular matrices are always square. -type Triangular interface { - Matrix - // Triangle returns the number of rows/columns in the matrix and its - // orientation. - Triangle() (n int, kind TriKind) - - // TTri is the equivalent of the T() method in the Matrix interface but - // guarantees the transpose is of triangular type. - TTri() Triangular -} - -// A RawTriangular can return a blas64.Triangular representation of the receiver. -// Changes to the blas64.Triangular.Data slice will be reflected in the original -// matrix, changes to the N, Stride, Uplo and Diag fields will not. -type RawTriangular interface { - RawTriangular() blas64.Triangular -} - -// A MutableTriangular can set elements of a triangular matrix. -type MutableTriangular interface { - Triangular - SetTri(i, j int, v float64) -} - -var ( - _ Matrix = TransposeTri{} - _ Triangular = TransposeTri{} - _ UntransposeTrier = TransposeTri{} -) - -// TransposeTri is a type for performing an implicit transpose of a Triangular -// matrix. It implements the Triangular interface, returning values from the -// transpose of the matrix within. -type TransposeTri struct { - Triangular Triangular -} - -// At returns the value of the element at row i and column j of the transposed -// matrix, that is, row j and column i of the Triangular field. -func (t TransposeTri) At(i, j int) float64 { - return t.Triangular.At(j, i) -} - -// Dims returns the dimensions of the transposed matrix. Triangular matrices are -// square and thus this is the same size as the original Triangular. -func (t TransposeTri) Dims() (r, c int) { - c, r = t.Triangular.Dims() - return r, c -} - -// T performs an implicit transpose by returning the Triangular field. -func (t TransposeTri) T() Matrix { - return t.Triangular -} - -// Triangle returns the number of rows/columns in the matrix and its orientation. -func (t TransposeTri) Triangle() (int, TriKind) { - n, upper := t.Triangular.Triangle() - return n, !upper -} - -// TTri performs an implicit transpose by returning the Triangular field. -func (t TransposeTri) TTri() Triangular { - return t.Triangular -} - -// Untranspose returns the Triangular field. -func (t TransposeTri) Untranspose() Matrix { - return t.Triangular -} - -func (t TransposeTri) UntransposeTri() Triangular { - return t.Triangular -} - -// NewTriDense creates a new Triangular matrix with n rows and columns. If data == nil, -// a new slice is allocated for the backing slice. If len(data) == n*n, data is -// used as the backing slice, and changes to the elements of the returned TriDense -// will be reflected in data. If neither of these is true, NewTriDense will panic. -// NewTriDense will panic if n is zero. -// -// The data must be arranged in row-major order, i.e. the (i*c + j)-th -// element in the data slice is the {i, j}-th element in the matrix. -// Only the values in the triangular portion corresponding to kind are used. -func NewTriDense(n int, kind TriKind, data []float64) *TriDense { - if n <= 0 { - if n == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if data != nil && len(data) != n*n { - panic(ErrShape) - } - if data == nil { - data = make([]float64, n*n) - } - uplo := blas.Lower - if kind == Upper { - uplo = blas.Upper - } - return &TriDense{ - mat: blas64.Triangular{ - N: n, - Stride: n, - Data: data, - Uplo: uplo, - Diag: blas.NonUnit, - }, - cap: n, - } -} - -func (t *TriDense) Dims() (r, c int) { - return t.mat.N, t.mat.N -} - -// Triangle returns the dimension of t and its orientation. The returned -// orientation is only valid when n is not zero. -func (t *TriDense) Triangle() (n int, kind TriKind) { - return t.mat.N, t.triKind() -} - -func (t *TriDense) isUpper() bool { - return isUpperUplo(t.mat.Uplo) -} - -func (t *TriDense) triKind() TriKind { - return TriKind(isUpperUplo(t.mat.Uplo)) -} - -func isUpperUplo(u blas.Uplo) bool { - switch u { - case blas.Upper: - return true - case blas.Lower: - return false - default: - panic(badTriangle) - } -} - -func uploToTriKind(u blas.Uplo) TriKind { - switch u { - case blas.Upper: - return Upper - case blas.Lower: - return Lower - default: - panic(badTriangle) - } -} - -// asSymBlas returns the receiver restructured as a blas64.Symmetric with the -// same backing memory. Panics if the receiver is unit. -// This returns a blas64.Symmetric and not a *SymDense because SymDense can only -// be upper triangular. -func (t *TriDense) asSymBlas() blas64.Symmetric { - if t.mat.Diag == blas.Unit { - panic("mat: cannot convert unit TriDense into blas64.Symmetric") - } - return blas64.Symmetric{ - N: t.mat.N, - Stride: t.mat.Stride, - Data: t.mat.Data, - Uplo: t.mat.Uplo, - } -} - -// T performs an implicit transpose by returning the receiver inside a Transpose. -func (t *TriDense) T() Matrix { - return Transpose{t} -} - -// TTri performs an implicit transpose by returning the receiver inside a TransposeTri. -func (t *TriDense) TTri() Triangular { - return TransposeTri{t} -} - -func (t *TriDense) RawTriangular() blas64.Triangular { - return t.mat -} - -// SetRawTriangular sets the underlying blas64.Triangular used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in the input. -// -// The supplied Triangular must not use blas.Unit storage format. -func (t *TriDense) SetRawTriangular(mat blas64.Triangular) { - if mat.Diag == blas.Unit { - panic("mat: cannot set TriDense with Unit storage format") - } - t.cap = mat.N - t.mat = mat -} - -// Reset zeros the dimensions of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (t *TriDense) Reset() { - // N and Stride must be zeroed in unison. - t.mat.N, t.mat.Stride = 0, 0 - // Defensively zero Uplo to ensure - // it is set correctly later. - t.mat.Uplo = 0 - t.mat.Data = t.mat.Data[:0] -} - -// Zero sets all of the matrix elements to zero. -func (t *TriDense) Zero() { - if t.isUpper() { - for i := 0; i < t.mat.N; i++ { - zero(t.mat.Data[i*t.mat.Stride+i : i*t.mat.Stride+t.mat.N]) - } - return - } - for i := 0; i < t.mat.N; i++ { - zero(t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+i+1]) - } -} - -// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the -// receiver for size-restricted operations. TriDense matrices can be zeroed using Reset. -func (t *TriDense) IsZero() bool { - // It must be the case that t.Dims() returns - // zeros in this case. See comment in Reset(). - return t.mat.Stride == 0 -} - -// untranspose untransposes a matrix if applicable. If a is an Untransposer, then -// untranspose returns the underlying matrix and true. If it is not, then it returns -// the input matrix and false. -func untransposeTri(a Triangular) (Triangular, bool) { - if ut, ok := a.(UntransposeTrier); ok { - return ut.UntransposeTri(), true - } - return a, false -} - -// reuseAs resizes a zero receiver to an n×n triangular matrix with the given -// orientation. If the receiver is non-zero, reuseAs checks that the receiver -// is the correct size and orientation. -func (t *TriDense) reuseAs(n int, kind TriKind) { - if n == 0 { - panic(ErrZeroLength) - } - ul := blas.Lower - if kind == Upper { - ul = blas.Upper - } - if t.mat.N > t.cap { - panic(badTriCap) - } - if t.IsZero() { - t.mat = blas64.Triangular{ - N: n, - Stride: n, - Diag: blas.NonUnit, - Data: use(t.mat.Data, n*n), - Uplo: ul, - } - t.cap = n - return - } - if t.mat.N != n { - panic(ErrShape) - } - if t.mat.Uplo != ul { - panic(ErrTriangle) - } -} - -// isolatedWorkspace returns a new TriDense matrix w with the size of a and -// returns a callback to defer which performs cleanup at the return of the call. -// This should be used when a method receiver is the same pointer as an input argument. -func (t *TriDense) isolatedWorkspace(a Triangular) (w *TriDense, restore func()) { - n, kind := a.Triangle() - if n == 0 { - panic(ErrZeroLength) - } - w = getWorkspaceTri(n, kind, false) - return w, func() { - t.Copy(w) - putWorkspaceTri(w) - } -} - -// DiagView returns the diagonal as a matrix backed by the original data. -func (t *TriDense) DiagView() Diagonal { - if t.mat.Diag == blas.Unit { - panic("mat: cannot take view of Unit diagonal") - } - n := t.mat.N - return &DiagDense{ - mat: blas64.Vector{ - N: n, - Inc: t.mat.Stride + 1, - Data: t.mat.Data[:(n-1)*t.mat.Stride+n], - }, - } -} - -// Copy makes a copy of elements of a into the receiver. It is similar to the -// built-in copy; it copies as much as the overlap between the two matrices and -// returns the number of rows and columns it copied. Only elements within the -// receiver's non-zero triangle are set. -// -// See the Copier interface for more information. -func (t *TriDense) Copy(a Matrix) (r, c int) { - r, c = a.Dims() - r = min(r, t.mat.N) - c = min(c, t.mat.N) - if r == 0 || c == 0 { - return 0, 0 - } - - switch a := a.(type) { - case RawMatrixer: - amat := a.RawMatrix() - if t.isUpper() { - for i := 0; i < r; i++ { - copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) - } - } else { - for i := 0; i < r; i++ { - copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) - } - } - case RawTriangular: - amat := a.RawTriangular() - aIsUpper := isUpperUplo(amat.Uplo) - tIsUpper := t.isUpper() - switch { - case tIsUpper && aIsUpper: - for i := 0; i < r; i++ { - copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) - } - case !tIsUpper && !aIsUpper: - for i := 0; i < r; i++ { - copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) - } - default: - for i := 0; i < r; i++ { - t.set(i, i, amat.Data[i*amat.Stride+i]) - } - } - default: - isUpper := t.isUpper() - for i := 0; i < r; i++ { - if isUpper { - for j := i; j < c; j++ { - t.set(i, j, a.At(i, j)) - } - } else { - for j := 0; j <= i; j++ { - t.set(i, j, a.At(i, j)) - } - } - } - } - - return r, c -} - -// InverseTri computes the inverse of the triangular matrix a, storing the result -// into the receiver. If a is ill-conditioned, a Condition error will be returned. -// Note that matrix inversion is numerically unstable, and should generally be -// avoided where possible, for example by using the Solve routines. -func (t *TriDense) InverseTri(a Triangular) error { - t.checkOverlapMatrix(a) - n, _ := a.Triangle() - t.reuseAs(a.Triangle()) - t.Copy(a) - work := getFloats(3*n, false) - iwork := getInts(n, false) - cond := lapack64.Trcon(CondNorm, t.mat, work, iwork) - putFloats(work) - putInts(iwork) - if math.IsInf(cond, 1) { - return Condition(cond) - } - ok := lapack64.Trtri(t.mat) - if !ok { - return Condition(math.Inf(1)) - } - if cond > ConditionTolerance { - return Condition(cond) - } - return nil -} - -// MulTri takes the product of triangular matrices a and b and places the result -// in the receiver. The size of a and b must match, and they both must have the -// same TriKind, or Mul will panic. -func (t *TriDense) MulTri(a, b Triangular) { - n, kind := a.Triangle() - nb, kindb := b.Triangle() - if n != nb { - panic(ErrShape) - } - if kind != kindb { - panic(ErrTriangle) - } - - aU, _ := untransposeTri(a) - bU, _ := untransposeTri(b) - t.checkOverlapMatrix(bU) - t.checkOverlapMatrix(aU) - t.reuseAs(n, kind) - var restore func() - if t == aU { - t, restore = t.isolatedWorkspace(aU) - defer restore() - } else if t == bU { - t, restore = t.isolatedWorkspace(bU) - defer restore() - } - - // Inspect types here, helps keep the loops later clean(er). - _, aDiag := aU.(Diagonal) - _, bDiag := bU.(Diagonal) - // If they are both diagonal only need 1 loop. - // All diagonal matrices are Upper. - // TODO: Add fast paths for DiagDense. - if aDiag && bDiag { - t.Zero() - for i := 0; i < n; i++ { - t.SetTri(i, i, a.At(i, i)*b.At(i, i)) - } - return - } - - // Now we know at least one matrix is non-diagonal. - // And all diagonal matrices are all Upper. - // The both-diagonal case is handled above. - // TODO: Add fast paths for Dense variants. - if kind == Upper { - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - switch { - case aDiag: - t.SetTri(i, j, a.At(i, i)*b.At(i, j)) - case bDiag: - t.SetTri(i, j, a.At(i, j)*b.At(j, j)) - default: - var v float64 - for k := i; k <= j; k++ { - v += a.At(i, k) * b.At(k, j) - } - t.SetTri(i, j, v) - } - } - } - return - } - for i := 0; i < n; i++ { - for j := 0; j <= i; j++ { - var v float64 - for k := j; k <= i; k++ { - v += a.At(i, k) * b.At(k, j) - } - t.SetTri(i, j, v) - } - } -} - -// ScaleTri multiplies the elements of a by f, placing the result in the receiver. -// If the receiver is non-zero, the size and kind of the receiver must match -// the input, or ScaleTri will panic. -func (t *TriDense) ScaleTri(f float64, a Triangular) { - n, kind := a.Triangle() - t.reuseAs(n, kind) - - // TODO(btracey): Improve the set of fast-paths. - switch a := a.(type) { - case RawTriangular: - amat := a.RawTriangular() - if t != a { - t.checkOverlap(generalFromTriangular(amat)) - } - if kind == Upper { - for i := 0; i < n; i++ { - ts := t.mat.Data[i*t.mat.Stride+i : i*t.mat.Stride+n] - as := amat.Data[i*amat.Stride+i : i*amat.Stride+n] - for i, v := range as { - ts[i] = v * f - } - } - return - } - for i := 0; i < n; i++ { - ts := t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+i+1] - as := amat.Data[i*amat.Stride : i*amat.Stride+i+1] - for i, v := range as { - ts[i] = v * f - } - } - return - default: - t.checkOverlapMatrix(a) - isUpper := kind == Upper - for i := 0; i < n; i++ { - if isUpper { - for j := i; j < n; j++ { - t.set(i, j, f*a.At(i, j)) - } - } else { - for j := 0; j <= i; j++ { - t.set(i, j, f*a.At(i, j)) - } - } - } - } -} - -// Trace returns the trace of the matrix. -func (t *TriDense) Trace() float64 { - // TODO(btracey): could use internal asm sum routine. - var v float64 - for i := 0; i < t.mat.N; i++ { - v += t.mat.Data[i*t.mat.Stride+i] - } - return v -} - -// copySymIntoTriangle copies a symmetric matrix into a TriDense -func copySymIntoTriangle(t *TriDense, s Symmetric) { - n, upper := t.Triangle() - ns := s.Symmetric() - if n != ns { - panic("mat: triangle size mismatch") - } - ts := t.mat.Stride - if rs, ok := s.(RawSymmetricer); ok { - sd := rs.RawSymmetric() - ss := sd.Stride - if upper { - if sd.Uplo == blas.Upper { - for i := 0; i < n; i++ { - copy(t.mat.Data[i*ts+i:i*ts+n], sd.Data[i*ss+i:i*ss+n]) - } - return - } - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - t.mat.Data[i*ts+j] = sd.Data[j*ss+i] - } - } - return - } - if sd.Uplo == blas.Upper { - for i := 0; i < n; i++ { - for j := 0; j <= i; j++ { - t.mat.Data[i*ts+j] = sd.Data[j*ss+i] - } - } - return - } - for i := 0; i < n; i++ { - copy(t.mat.Data[i*ts:i*ts+i+1], sd.Data[i*ss:i*ss+i+1]) - } - return - } - if upper { - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - t.mat.Data[i*ts+j] = s.At(i, j) - } - } - return - } - for i := 0; i < n; i++ { - for j := 0; j <= i; j++ { - t.mat.Data[i*ts+j] = s.At(i, j) - } - } -} - -// DoNonZero calls the function fn for each of the non-zero elements of t. The function fn -// takes a row/column index and the element value of t at (i, j). -func (t *TriDense) DoNonZero(fn func(i, j int, v float64)) { - if t.isUpper() { - for i := 0; i < t.mat.N; i++ { - for j := i; j < t.mat.N; j++ { - v := t.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - } - return - } - for i := 0; i < t.mat.N; i++ { - for j := 0; j <= i; j++ { - v := t.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - } -} - -// DoRowNonZero calls the function fn for each of the non-zero elements of row i of t. The function fn -// takes a row/column index and the element value of t at (i, j). -func (t *TriDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { - if i < 0 || t.mat.N <= i { - panic(ErrRowAccess) - } - if t.isUpper() { - for j := i; j < t.mat.N; j++ { - v := t.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - return - } - for j := 0; j <= i; j++ { - v := t.at(i, j) - if v != 0 { - fn(i, j, v) - } - } -} - -// DoColNonZero calls the function fn for each of the non-zero elements of column j of t. The function fn -// takes a row/column index and the element value of t at (i, j). -func (t *TriDense) DoColNonZero(j int, fn func(i, j int, v float64)) { - if j < 0 || t.mat.N <= j { - panic(ErrColAccess) - } - if t.isUpper() { - for i := 0; i <= j; i++ { - v := t.at(i, j) - if v != 0 { - fn(i, j, v) - } - } - return - } - for i := j; i < t.mat.N; i++ { - v := t.at(i, j) - if v != 0 { - fn(i, j, v) - } - } -} diff --git a/vendor/gonum.org/v1/gonum/mat/triband.go b/vendor/gonum.org/v1/gonum/mat/triband.go deleted file mode 100644 index 698af89af..000000000 --- a/vendor/gonum.org/v1/gonum/mat/triband.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright ©2018 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" -) - -var ( - triBand TriBanded - _ Banded = triBand - _ Triangular = triBand - - triBandDense *TriBandDense - _ Matrix = triBandDense - _ Triangular = triBandDense - _ Banded = triBandDense - _ TriBanded = triBandDense - _ RawTriBander = triBandDense - _ MutableTriBanded = triBandDense -) - -// TriBanded is a triangular band matrix interface type. -type TriBanded interface { - Banded - - // Triangle returns the number of rows/columns in the matrix and its - // orientation. - Triangle() (n int, kind TriKind) - - // TTri is the equivalent of the T() method in the Matrix interface but - // guarantees the transpose is of triangular type. - TTri() Triangular - - // TriBand returns the number of rows/columns in the matrix, the - // size of the bandwidth, and the orientation. - TriBand() (n, k int, kind TriKind) - - // TTriBand is the equivalent of the T() method in the Matrix interface but - // guarantees the transpose is of banded triangular type. - TTriBand() TriBanded -} - -// A RawTriBander can return a blas64.TriangularBand representation of the receiver. -// Changes to the blas64.TriangularBand.Data slice will be reflected in the original -// matrix, changes to the N, K, Stride, Uplo and Diag fields will not. -type RawTriBander interface { - RawTriBand() blas64.TriangularBand -} - -// MutableTriBanded is a triangular band matrix interface type that allows -// elements to be altered. -type MutableTriBanded interface { - TriBanded - SetTriBand(i, j int, v float64) -} - -var ( - tTriBand TransposeTriBand - _ Matrix = tTriBand - _ TriBanded = tTriBand - _ Untransposer = tTriBand - _ UntransposeTrier = tTriBand - _ UntransposeBander = tTriBand - _ UntransposeTriBander = tTriBand -) - -// TransposeTriBand is a type for performing an implicit transpose of a TriBanded -// matrix. It implements the TriBanded interface, returning values from the -// transpose of the matrix within. -type TransposeTriBand struct { - TriBanded TriBanded -} - -// At returns the value of the element at row i and column j of the transposed -// matrix, that is, row j and column i of the TriBanded field. -func (t TransposeTriBand) At(i, j int) float64 { - return t.TriBanded.At(j, i) -} - -// Dims returns the dimensions of the transposed matrix. TriBanded matrices are -// square and thus this is the same size as the original TriBanded. -func (t TransposeTriBand) Dims() (r, c int) { - c, r = t.TriBanded.Dims() - return r, c -} - -// T performs an implicit transpose by returning the TriBand field. -func (t TransposeTriBand) T() Matrix { - return t.TriBanded -} - -// Triangle returns the number of rows/columns in the matrix and its orientation. -func (t TransposeTriBand) Triangle() (int, TriKind) { - n, upper := t.TriBanded.Triangle() - return n, !upper -} - -// TTri performs an implicit transpose by returning the TriBand field. -func (t TransposeTriBand) TTri() Triangular { - return t.TriBanded -} - -// Bandwidth returns the upper and lower bandwidths of the matrix. -func (t TransposeTriBand) Bandwidth() (kl, ku int) { - kl, ku = t.TriBanded.Bandwidth() - return ku, kl -} - -// TBand performs an implicit transpose by returning the TriBand field. -func (t TransposeTriBand) TBand() Banded { - return t.TriBanded -} - -// TriBand returns the number of rows/columns in the matrix, the -// size of the bandwidth, and the orientation. -func (t TransposeTriBand) TriBand() (n, k int, kind TriKind) { - n, k, kind = t.TriBanded.TriBand() - return n, k, !kind -} - -// TTriBand performs an implicit transpose by returning the TriBand field. -func (t TransposeTriBand) TTriBand() TriBanded { - return t.TriBanded -} - -// Untranspose returns the Triangular field. -func (t TransposeTriBand) Untranspose() Matrix { - return t.TriBanded -} - -// UntransposeTri returns the underlying Triangular matrix. -func (t TransposeTriBand) UntransposeTri() Triangular { - return t.TriBanded -} - -// UntransposeBand returns the underlying Banded matrix. -func (t TransposeTriBand) UntransposeBand() Banded { - return t.TriBanded -} - -// UntransposeTriBand returns the underlying TriBanded matrix. -func (t TransposeTriBand) UntransposeTriBand() TriBanded { - return t.TriBanded -} - -// TriBandDense represents a triangular band matrix in dense storage format. -type TriBandDense struct { - mat blas64.TriangularBand -} - -// NewTriBandDense creates a new triangular banded matrix with n rows and columns, -// k bands in the direction of the specified kind. If data == nil, -// a new slice is allocated for the backing slice. If len(data) == n*(k+1), -// data is used as the backing slice, and changes to the elements of the returned -// TriBandDense will be reflected in data. If neither of these is true, NewTriBandDense -// will panic. k must be at least zero and less than n, otherwise NewTriBandDense will panic. -// -// The data must be arranged in row-major order constructed by removing the zeros -// from the rows outside the band and aligning the diagonals. For example, if -// the upper-triangular banded matrix -// 1 2 3 0 0 0 -// 0 4 5 6 0 0 -// 0 0 7 8 9 0 -// 0 0 0 10 11 12 -// 0 0 0 0 13 14 -// 0 0 0 0 0 15 -// becomes (* entries are never accessed) -// 1 2 3 -// 4 5 6 -// 7 8 9 -// 10 11 12 -// 13 14 * -// 15 * * -// which is passed to NewTriBandDense as []float64{1, 2, ..., 15, *, *, *} -// with k=2 and kind = mat.Upper. -// The lower triangular banded matrix -// 1 0 0 0 0 0 -// 2 3 0 0 0 0 -// 4 5 6 0 0 0 -// 0 7 8 9 0 0 -// 0 0 10 11 12 0 -// 0 0 0 13 14 15 -// becomes (* entries are never accessed) -// * * 1 -// * 2 3 -// 4 5 6 -// 7 8 9 -// 10 11 12 -// 13 14 15 -// which is passed to NewTriBandDense as []float64{*, *, *, 1, 2, ..., 15} -// with k=2 and kind = mat.Lower. -// Only the values in the band portion of the matrix are used. -func NewTriBandDense(n, k int, kind TriKind, data []float64) *TriBandDense { - if n <= 0 || k < 0 { - if n == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if k+1 > n { - panic("mat: band out of range") - } - bc := k + 1 - if data != nil && len(data) != n*bc { - panic(ErrShape) - } - if data == nil { - data = make([]float64, n*bc) - } - uplo := blas.Lower - if kind { - uplo = blas.Upper - } - return &TriBandDense{ - mat: blas64.TriangularBand{ - Uplo: uplo, - Diag: blas.NonUnit, - N: n, - K: k, - Data: data, - Stride: bc, - }, - } -} - -// Dims returns the number of rows and columns in the matrix. -func (t *TriBandDense) Dims() (r, c int) { - return t.mat.N, t.mat.N -} - -// T performs an implicit transpose by returning the receiver inside a Transpose. -func (t *TriBandDense) T() Matrix { - return Transpose{t} -} - -// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the -// receiver for size-restricted operations. TriBandDense matrices can be zeroed using Reset. -func (t *TriBandDense) IsZero() bool { - // It must be the case that t.Dims() returns - // zeros in this case. See comment in Reset(). - return t.mat.Stride == 0 -} - -// Reset zeros the dimensions of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (t *TriBandDense) Reset() { - t.mat.N = 0 - t.mat.Stride = 0 - t.mat.K = 0 - t.mat.Data = t.mat.Data[:0] -} - -// Zero sets all of the matrix elements to zero. -func (t *TriBandDense) Zero() { - if t.isUpper() { - for i := 0; i < t.mat.N; i++ { - u := min(1+t.mat.K, t.mat.N-i) - zero(t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+u]) - } - return - } - for i := 0; i < t.mat.N; i++ { - l := max(0, t.mat.K-i) - zero(t.mat.Data[i*t.mat.Stride+l : i*t.mat.Stride+t.mat.K+1]) - } -} - -func (t *TriBandDense) isUpper() bool { - return isUpperUplo(t.mat.Uplo) -} - -func (t *TriBandDense) triKind() TriKind { - return TriKind(isUpperUplo(t.mat.Uplo)) -} - -// Triangle returns the dimension of t and its orientation. The returned -// orientation is only valid when n is not zero. -func (t *TriBandDense) Triangle() (n int, kind TriKind) { - return t.mat.N, t.triKind() -} - -// TTri performs an implicit transpose by returning the receiver inside a TransposeTri. -func (t *TriBandDense) TTri() Triangular { - return TransposeTri{t} -} - -// Bandwidth returns the upper and lower bandwidths of the matrix. -func (t *TriBandDense) Bandwidth() (kl, ku int) { - if t.isUpper() { - return 0, t.mat.K - } - return t.mat.K, 0 -} - -// TBand performs an implicit transpose by returning the receiver inside a TransposeBand. -func (t *TriBandDense) TBand() Banded { - return TransposeBand{t} -} - -// TriBand returns the number of rows/columns in the matrix, the -// size of the bandwidth, and the orientation. -func (t *TriBandDense) TriBand() (n, k int, kind TriKind) { - return t.mat.N, t.mat.K, TriKind(!t.IsZero()) && t.triKind() -} - -// TTriBand performs an implicit transpose by returning the receiver inside a TransposeTriBand. -func (t *TriBandDense) TTriBand() TriBanded { - return TransposeTriBand{t} -} - -// RawTriBand returns the underlying blas64.TriangularBand used by the receiver. -// Changes to the blas64.TriangularBand.Data slice will be reflected in the original -// matrix, changes to the N, K, Stride, Uplo and Diag fields will not. -func (t *TriBandDense) RawTriBand() blas64.TriangularBand { - return t.mat -} - -// SetRawTriBand sets the underlying blas64.TriangularBand used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in the input. -// -// The supplied TriangularBand must not use blas.Unit storage format. -func (t *TriBandDense) SetRawTriBand(mat blas64.TriangularBand) { - if mat.Diag == blas.Unit { - panic("mat: cannot set TriBand with Unit storage") - } - t.mat = mat -} - -// DiagView returns the diagonal as a matrix backed by the original data. -func (t *TriBandDense) DiagView() Diagonal { - if t.mat.Diag == blas.Unit { - panic("mat: cannot take view of Unit diagonal") - } - n := t.mat.N - data := t.mat.Data - if !t.isUpper() { - data = data[t.mat.K:] - } - return &DiagDense{ - mat: blas64.Vector{ - N: n, - Inc: t.mat.Stride, - Data: data[:(n-1)*t.mat.Stride+1], - }, - } -} - -// Trace returns the trace. -func (t *TriBandDense) Trace() float64 { - rb := t.RawTriBand() - var tr float64 - var offsetIndex int - if rb.Uplo == blas.Lower { - offsetIndex = rb.K - } - for i := 0; i < rb.N; i++ { - tr += rb.Data[offsetIndex+i*rb.Stride] - } - return tr -} diff --git a/vendor/gonum.org/v1/gonum/mat/vector.go b/vendor/gonum.org/v1/gonum/mat/vector.go deleted file mode 100644 index f65bbf1ed..000000000 --- a/vendor/gonum.org/v1/gonum/mat/vector.go +++ /dev/null @@ -1,745 +0,0 @@ -// Copyright ©2013 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat - -import ( - "gonum.org/v1/gonum/blas" - "gonum.org/v1/gonum/blas/blas64" - "gonum.org/v1/gonum/internal/asm/f64" -) - -var ( - vector *VecDense - - _ Matrix = vector - _ Vector = vector - _ Reseter = vector -) - -// Vector is a vector. -type Vector interface { - Matrix - AtVec(int) float64 - Len() int -} - -// TransposeVec is a type for performing an implicit transpose of a Vector. -// It implements the Vector interface, returning values from the transpose -// of the vector within. -type TransposeVec struct { - Vector Vector -} - -// At returns the value of the element at row i and column j of the transposed -// matrix, that is, row j and column i of the Vector field. -func (t TransposeVec) At(i, j int) float64 { - return t.Vector.At(j, i) -} - -// AtVec returns the element at position i. It panics if i is out of bounds. -func (t TransposeVec) AtVec(i int) float64 { - return t.Vector.AtVec(i) -} - -// Dims returns the dimensions of the transposed vector. -func (t TransposeVec) Dims() (r, c int) { - c, r = t.Vector.Dims() - return r, c -} - -// T performs an implicit transpose by returning the Vector field. -func (t TransposeVec) T() Matrix { - return t.Vector -} - -// Len returns the number of columns in the vector. -func (t TransposeVec) Len() int { - return t.Vector.Len() -} - -// TVec performs an implicit transpose by returning the Vector field. -func (t TransposeVec) TVec() Vector { - return t.Vector -} - -// Untranspose returns the Vector field. -func (t TransposeVec) Untranspose() Matrix { - return t.Vector -} - -func (t TransposeVec) UntransposeVec() Vector { - return t.Vector -} - -// VecDense represents a column vector. -type VecDense struct { - mat blas64.Vector - // A BLAS vector can have a negative increment, but allowing this - // in the mat type complicates a lot of code, and doesn't gain anything. - // VecDense must have positive increment in this package. -} - -// NewVecDense creates a new VecDense of length n. If data == nil, -// a new slice is allocated for the backing slice. If len(data) == n, data is -// used as the backing slice, and changes to the elements of the returned VecDense -// will be reflected in data. If neither of these is true, NewVecDense will panic. -// NewVecDense will panic if n is zero. -func NewVecDense(n int, data []float64) *VecDense { - if n <= 0 { - if n == 0 { - panic(ErrZeroLength) - } - panic("mat: negative dimension") - } - if len(data) != n && data != nil { - panic(ErrShape) - } - if data == nil { - data = make([]float64, n) - } - return &VecDense{ - mat: blas64.Vector{ - N: n, - Inc: 1, - Data: data, - }, - } -} - -// SliceVec returns a new Vector that shares backing data with the receiver. -// The returned matrix starts at i of the receiver and extends k-i elements. -// SliceVec panics with ErrIndexOutOfRange if the slice is outside the capacity -// of the receiver. -func (v *VecDense) SliceVec(i, k int) Vector { - if i < 0 || k <= i || v.Cap() < k { - panic(ErrIndexOutOfRange) - } - return &VecDense{ - mat: blas64.Vector{ - N: k - i, - Inc: v.mat.Inc, - Data: v.mat.Data[i*v.mat.Inc : (k-1)*v.mat.Inc+1], - }, - } -} - -// Dims returns the number of rows and columns in the matrix. Columns is always 1 -// for a non-Reset vector. -func (v *VecDense) Dims() (r, c int) { - if v.IsZero() { - return 0, 0 - } - return v.mat.N, 1 -} - -// Caps returns the number of rows and columns in the backing matrix. Columns is always 1 -// for a non-Reset vector. -func (v *VecDense) Caps() (r, c int) { - if v.IsZero() { - return 0, 0 - } - return v.Cap(), 1 -} - -// Len returns the length of the vector. -func (v *VecDense) Len() int { - return v.mat.N -} - -// Cap returns the capacity of the vector. -func (v *VecDense) Cap() int { - if v.IsZero() { - return 0 - } - return (cap(v.mat.Data)-1)/v.mat.Inc + 1 -} - -// T performs an implicit transpose by returning the receiver inside a Transpose. -func (v *VecDense) T() Matrix { - return Transpose{v} -} - -// TVec performs an implicit transpose by returning the receiver inside a TransposeVec. -func (v *VecDense) TVec() Vector { - return TransposeVec{v} -} - -// Reset zeros the length of the vector so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (v *VecDense) Reset() { - // No change of Inc or N to 0 may be - // made unless both are set to 0. - v.mat.Inc = 0 - v.mat.N = 0 - v.mat.Data = v.mat.Data[:0] -} - -// Zero sets all of the matrix elements to zero. -func (v *VecDense) Zero() { - for i := 0; i < v.mat.N; i++ { - v.mat.Data[v.mat.Inc*i] = 0 - } -} - -// CloneVec makes a copy of a into the receiver, overwriting the previous value -// of the receiver. -func (v *VecDense) CloneVec(a Vector) { - if v == a { - return - } - n := a.Len() - v.mat = blas64.Vector{ - N: n, - Inc: 1, - Data: use(v.mat.Data, n), - } - if r, ok := a.(RawVectorer); ok { - blas64.Copy(r.RawVector(), v.mat) - return - } - for i := 0; i < a.Len(); i++ { - v.SetVec(i, a.AtVec(i)) - } -} - -// VecDenseCopyOf returns a newly allocated copy of the elements of a. -func VecDenseCopyOf(a Vector) *VecDense { - v := &VecDense{} - v.CloneVec(a) - return v -} - -func (v *VecDense) RawVector() blas64.Vector { - return v.mat -} - -// CopyVec makes a copy of elements of a into the receiver. It is similar to the -// built-in copy; it copies as much as the overlap between the two vectors and -// returns the number of elements it copied. -func (v *VecDense) CopyVec(a Vector) int { - n := min(v.Len(), a.Len()) - if v == a { - return n - } - if r, ok := a.(RawVectorer); ok { - src := r.RawVector() - src.N = n - dst := v.mat - dst.N = n - blas64.Copy(src, dst) - return n - } - for i := 0; i < n; i++ { - v.setVec(i, a.AtVec(i)) - } - return n -} - -// ScaleVec scales the vector a by alpha, placing the result in the receiver. -func (v *VecDense) ScaleVec(alpha float64, a Vector) { - n := a.Len() - - if v == a { - if v.mat.Inc == 1 { - f64.ScalUnitary(alpha, v.mat.Data) - return - } - f64.ScalInc(alpha, v.mat.Data, uintptr(n), uintptr(v.mat.Inc)) - return - } - - v.reuseAs(n) - - if rv, ok := a.(RawVectorer); ok { - mat := rv.RawVector() - v.checkOverlap(mat) - if v.mat.Inc == 1 && mat.Inc == 1 { - f64.ScalUnitaryTo(v.mat.Data, alpha, mat.Data) - return - } - f64.ScalIncTo(v.mat.Data, uintptr(v.mat.Inc), - alpha, mat.Data, uintptr(n), uintptr(mat.Inc)) - return - } - - for i := 0; i < n; i++ { - v.setVec(i, alpha*a.AtVec(i)) - } -} - -// AddScaledVec adds the vectors a and alpha*b, placing the result in the receiver. -func (v *VecDense) AddScaledVec(a Vector, alpha float64, b Vector) { - if alpha == 1 { - v.AddVec(a, b) - return - } - if alpha == -1 { - v.SubVec(a, b) - return - } - - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - var amat, bmat blas64.Vector - fast := true - aU, _ := untranspose(a) - if rv, ok := aU.(RawVectorer); ok { - amat = rv.RawVector() - if v != a { - v.checkOverlap(amat) - } - } else { - fast = false - } - bU, _ := untranspose(b) - if rv, ok := bU.(RawVectorer); ok { - bmat = rv.RawVector() - if v != b { - v.checkOverlap(bmat) - } - } else { - fast = false - } - - v.reuseAs(ar) - - switch { - case alpha == 0: // v <- a - if v == a { - return - } - v.CopyVec(a) - case v == a && v == b: // v <- v + alpha * v = (alpha + 1) * v - blas64.Scal(alpha+1, v.mat) - case !fast: // v <- a + alpha * b without blas64 support. - for i := 0; i < ar; i++ { - v.setVec(i, a.AtVec(i)+alpha*b.AtVec(i)) - } - case v == a && v != b: // v <- v + alpha * b - if v.mat.Inc == 1 && bmat.Inc == 1 { - // Fast path for a common case. - f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) - } else { - f64.AxpyInc(alpha, bmat.Data, v.mat.Data, - uintptr(ar), uintptr(bmat.Inc), uintptr(v.mat.Inc), 0, 0) - } - default: // v <- a + alpha * b or v <- a + alpha * v - if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { - // Fast path for a common case. - f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) - } else { - f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, - alpha, bmat.Data, amat.Data, - uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) - } - } -} - -// AddVec adds the vectors a and b, placing the result in the receiver. -func (v *VecDense) AddVec(a, b Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - - if arv, ok := aU.(RawVectorer); ok { - if brv, ok := bU.(RawVectorer); ok { - amat := arv.RawVector() - bmat := brv.RawVector() - - if v != a { - v.checkOverlap(amat) - } - if v != b { - v.checkOverlap(bmat) - } - - if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { - // Fast path for a common case. - f64.AxpyUnitaryTo(v.mat.Data, 1, bmat.Data, amat.Data) - return - } - f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, - 1, bmat.Data, amat.Data, - uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) - return - } - } - - for i := 0; i < ar; i++ { - v.setVec(i, a.AtVec(i)+b.AtVec(i)) - } -} - -// SubVec subtracts the vector b from a, placing the result in the receiver. -func (v *VecDense) SubVec(a, b Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - - if arv, ok := aU.(RawVectorer); ok { - if brv, ok := bU.(RawVectorer); ok { - amat := arv.RawVector() - bmat := brv.RawVector() - - if v != a { - v.checkOverlap(amat) - } - if v != b { - v.checkOverlap(bmat) - } - - if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { - // Fast path for a common case. - f64.AxpyUnitaryTo(v.mat.Data, -1, bmat.Data, amat.Data) - return - } - f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, - -1, bmat.Data, amat.Data, - uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) - return - } - } - - for i := 0; i < ar; i++ { - v.setVec(i, a.AtVec(i)-b.AtVec(i)) - } -} - -// MulElemVec performs element-wise multiplication of a and b, placing the result -// in the receiver. -func (v *VecDense) MulElemVec(a, b Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - - if arv, ok := aU.(RawVectorer); ok { - if brv, ok := bU.(RawVectorer); ok { - amat := arv.RawVector() - bmat := brv.RawVector() - - if v != a { - v.checkOverlap(amat) - } - if v != b { - v.checkOverlap(bmat) - } - - if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { - // Fast path for a common case. - for i, a := range amat.Data { - v.mat.Data[i] = a * bmat.Data[i] - } - return - } - var ia, ib int - for i := 0; i < ar; i++ { - v.setVec(i, amat.Data[ia]*bmat.Data[ib]) - ia += amat.Inc - ib += bmat.Inc - } - return - } - } - - for i := 0; i < ar; i++ { - v.setVec(i, a.AtVec(i)*b.AtVec(i)) - } -} - -// DivElemVec performs element-wise division of a by b, placing the result -// in the receiver. -func (v *VecDense) DivElemVec(a, b Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - aU, _ := untranspose(a) - bU, _ := untranspose(b) - - if arv, ok := aU.(RawVectorer); ok { - if brv, ok := bU.(RawVectorer); ok { - amat := arv.RawVector() - bmat := brv.RawVector() - - if v != a { - v.checkOverlap(amat) - } - if v != b { - v.checkOverlap(bmat) - } - - if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { - // Fast path for a common case. - for i, a := range amat.Data { - v.setVec(i, a/bmat.Data[i]) - } - return - } - var ia, ib int - for i := 0; i < ar; i++ { - v.setVec(i, amat.Data[ia]/bmat.Data[ib]) - ia += amat.Inc - ib += bmat.Inc - } - } - } - - for i := 0; i < ar; i++ { - v.setVec(i, a.AtVec(i)/b.AtVec(i)) - } -} - -// MulVec computes a * b. The result is stored into the receiver. -// MulVec panics if the number of columns in a does not equal the number of rows in b -// or if the number of columns in b does not equal 1. -func (v *VecDense) MulVec(a Matrix, b Vector) { - r, c := a.Dims() - br, bc := b.Dims() - if c != br || bc != 1 { - panic(ErrShape) - } - - aU, trans := untranspose(a) - var bmat blas64.Vector - fast := true - bU, _ := untranspose(b) - if rv, ok := bU.(RawVectorer); ok { - bmat = rv.RawVector() - if v != b { - v.checkOverlap(bmat) - } - } else { - fast = false - } - - v.reuseAs(r) - var restore func() - if v == aU { - v, restore = v.isolatedWorkspace(aU.(*VecDense)) - defer restore() - } else if v == b { - v, restore = v.isolatedWorkspace(b) - defer restore() - } - - // TODO(kortschak): Improve the non-fast paths. - switch aU := aU.(type) { - case Vector: - if b.Len() == 1 { - // {n,1} x {1,1} - v.ScaleVec(b.AtVec(0), aU) - return - } - - // {1,n} x {n,1} - if fast { - if rv, ok := aU.(RawVectorer); ok { - amat := rv.RawVector() - if v != aU { - v.checkOverlap(amat) - } - - if amat.Inc == 1 && bmat.Inc == 1 { - // Fast path for a common case. - v.setVec(0, f64.DotUnitary(amat.Data, bmat.Data)) - return - } - v.setVec(0, f64.DotInc(amat.Data, bmat.Data, - uintptr(c), uintptr(amat.Inc), uintptr(bmat.Inc), 0, 0)) - return - } - } - var sum float64 - for i := 0; i < c; i++ { - sum += aU.AtVec(i) * b.AtVec(i) - } - v.setVec(0, sum) - return - case RawSymmetricer: - if fast { - amat := aU.RawSymmetric() - // We don't know that a is a *SymDense, so make - // a temporary SymDense to check overlap. - (&SymDense{mat: amat}).checkOverlap(v.asGeneral()) - blas64.Symv(1, amat, bmat, 0, v.mat) - return - } - case RawTriangular: - v.CopyVec(b) - amat := aU.RawTriangular() - // We don't know that a is a *TriDense, so make - // a temporary TriDense to check overlap. - (&TriDense{mat: amat}).checkOverlap(v.asGeneral()) - ta := blas.NoTrans - if trans { - ta = blas.Trans - } - blas64.Trmv(ta, amat, v.mat) - case RawMatrixer: - if fast { - amat := aU.RawMatrix() - // We don't know that a is a *Dense, so make - // a temporary Dense to check overlap. - (&Dense{mat: amat}).checkOverlap(v.asGeneral()) - t := blas.NoTrans - if trans { - t = blas.Trans - } - blas64.Gemv(t, 1, amat, bmat, 0, v.mat) - return - } - default: - if fast { - for i := 0; i < r; i++ { - var f float64 - for j := 0; j < c; j++ { - f += a.At(i, j) * bmat.Data[j*bmat.Inc] - } - v.setVec(i, f) - } - return - } - } - - for i := 0; i < r; i++ { - var f float64 - for j := 0; j < c; j++ { - f += a.At(i, j) * b.AtVec(j) - } - v.setVec(i, f) - } -} - -// reuseAs resizes an empty vector to a r×1 vector, -// or checks that a non-empty matrix is r×1. -func (v *VecDense) reuseAs(r int) { - if r == 0 { - panic(ErrZeroLength) - } - if v.IsZero() { - v.mat = blas64.Vector{ - N: r, - Inc: 1, - Data: use(v.mat.Data, r), - } - return - } - if r != v.mat.N { - panic(ErrShape) - } -} - -// IsZero returns whether the receiver is zero-sized. Zero-sized vectors can be the -// receiver for size-restricted operations. VecDenses can be zeroed using Reset. -func (v *VecDense) IsZero() bool { - // It must be the case that v.Dims() returns - // zeros in this case. See comment in Reset(). - return v.mat.Inc == 0 -} - -func (v *VecDense) isolatedWorkspace(a Vector) (n *VecDense, restore func()) { - l := a.Len() - if l == 0 { - panic(ErrZeroLength) - } - n = getWorkspaceVec(l, false) - return n, func() { - v.CopyVec(n) - putWorkspaceVec(n) - } -} - -// asDense returns a Dense representation of the receiver with the same -// underlying data. -func (v *VecDense) asDense() *Dense { - return &Dense{ - mat: v.asGeneral(), - capRows: v.mat.N, - capCols: 1, - } -} - -// asGeneral returns a blas64.General representation of the receiver with the -// same underlying data. -func (v *VecDense) asGeneral() blas64.General { - return blas64.General{ - Rows: v.mat.N, - Cols: 1, - Stride: v.mat.Inc, - Data: v.mat.Data, - } -} - -// ColViewOf reflects the column j of the RawMatrixer m, into the receiver -// backed by the same underlying data. The length of the receiver must either be -// zero or match the number of rows in m. -func (v *VecDense) ColViewOf(m RawMatrixer, j int) { - rm := m.RawMatrix() - - if j >= rm.Cols || j < 0 { - panic(ErrColAccess) - } - if !v.IsZero() && v.mat.N != rm.Rows { - panic(ErrShape) - } - - v.mat.Inc = rm.Stride - v.mat.Data = rm.Data[j : (rm.Rows-1)*rm.Stride+j+1] - v.mat.N = rm.Rows -} - -// RowViewOf reflects the row i of the RawMatrixer m, into the receiver -// backed by the same underlying data. The length of the receiver must either be -// zero or match the number of columns in m. -func (v *VecDense) RowViewOf(m RawMatrixer, i int) { - rm := m.RawMatrix() - - if i >= rm.Rows || i < 0 { - panic(ErrRowAccess) - } - if !v.IsZero() && v.mat.N != rm.Cols { - panic(ErrShape) - } - - v.mat.Inc = 1 - v.mat.Data = rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] - v.mat.N = rm.Cols -} diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS new file mode 100644 index 000000000..f73b72574 --- /dev/null +++ b/vendor/google.golang.org/api/AUTHORS @@ -0,0 +1,10 @@ +# This is the official list of authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. +Google Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS new file mode 100644 index 000000000..fe55ebff0 --- /dev/null +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -0,0 +1,55 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://cla.developers.google.com/about/google-individual +# https://cla.developers.google.com/about/google-corporate +# +# The CLA can be filled out on the web: +# +# https://cla.developers.google.com/ +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Alain Vongsouvanhalainv +Andrew Gerrand +Brad Fitzpatrick +Eric Koleda +Francesc Campoy +Garrick Evans +Glenn Lewis +Ivan Krasin +Jason Hall +Johan Euphrosine +Kostik Shtoyk +Kunpei Sakai +Matthew Whisenhunt +Michael McGreevy +Nick Craig-Wood +Robbie Trencheny +Ross Light +Sarah Adams +Scott Van Woudenberg +Takashi Matsuo diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE new file mode 100644 index 000000000..263aa7a0c --- /dev/null +++ b/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 000000000..eca1ea250 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 000000000..69b8659fd --- /dev/null +++ b/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,102 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "golang.org/x/oauth2" + + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.TokenSource != nil { + return &google.Credentials{TokenSource: ds.TokenSource}, nil + } + cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + if err != nil { + return nil, err + } + if len(cred.JSON) > 0 { + return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + // For GAE and GCE, the JSON is empty so return the default credentials directly. + return cred, nil +} + +// JSON key file type. +const ( + serviceAccountKey = "service_account" +) + +// credentialsFromJSON returns a google.Credentials based on the input. +// +// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow +// - Otherwise, returns OAuth 2.0 flow. +func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { + cred, err := google.CredentialsFromJSON(ctx, data, scopes...) + if err != nil { + return nil, err + } + if len(data) > 0 && len(scopes) == 0 { + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(cred.JSON, &f); err != nil { + return nil, err + } + if f.Type == serviceAccountKey { + ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) + if err != nil { + return nil, err + } + cred.TokenSource = ts + } + } + return cred, err +} + +func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { + // Use the API endpoint as the default audience + audience := endpoint + if len(audiences) > 0 { + // TODO(shinfan): Update golang oauth to support multiple audiences. + if len(audiences) > 1 { + return nil, fmt.Errorf("multiple audiences support is not implemented") + } + audience = audiences[0] + } + return google.JWTAccessTokenSourceFromJSON(data, audience) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go new file mode 100644 index 000000000..a4426dcb7 --- /dev/null +++ b/vendor/google.golang.org/api/internal/pool.go @@ -0,0 +1,61 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + + "google.golang.org/grpc/naming" +) + +// PoolResolver provides a fixed list of addresses to load balance between +// and does not provide further updates. +type PoolResolver struct { + poolSize int + dialOpt *DialSettings + ch chan []*naming.Update +} + +// NewPoolResolver returns a PoolResolver +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func NewPoolResolver(size int, o *DialSettings) *PoolResolver { + return &PoolResolver{poolSize: size, dialOpt: o} +} + +// Resolve returns a Watcher for the endpoint defined by the DialSettings +// provided to NewPoolResolver. +func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { + if r.dialOpt.Endpoint == "" { + return nil, errors.New("no endpoint configured") + } + addrs := make([]*naming.Update, 0, r.poolSize) + for i := 0; i < r.poolSize; i++ { + addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) + } + r.ch = make(chan []*naming.Update, 1) + r.ch <- addrs + return r, nil +} + +// Next returns a static list of updates on the first call, +// and blocks indefinitely until Close is called on subsequent calls. +func (r *PoolResolver) Next() ([]*naming.Update, error) { + return <-r.ch, nil +} + +// Close releases resources associated with the pool and causes Next to unblock. +func (r *PoolResolver) Close() { + close(r.ch) +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 000000000..062301c65 --- /dev/null +++ b/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,96 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal supports the options and transport packages. +package internal + +import ( + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + NoAuth bool + + // Google API system parameters. For more information please read: + // https://cloud.google.com/apis/docs/system-parameters + QuotaProject string + RequestReason string +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { + return errors.New("WithScopes is incompatible with WithAudience") + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + if ds.HTTPClient != nil && ds.QuotaProject != "" { + return errors.New("WithHTTPClient is incompatible with QuotaProject") + } + if ds.HTTPClient != nil && ds.RequestReason != "" { + return errors.New("WithHTTPClient is incompatible with RequestReason") + } + + return nil +} diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go new file mode 100644 index 000000000..3c8ea7732 --- /dev/null +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -0,0 +1,231 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iterator provides support for standard Google API iterators. +// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. +package iterator + +import ( + "errors" + "fmt" + "reflect" +) + +// Done is returned by an iterator's Next method when the iteration is +// complete; when there are no more items to return. +var Done = errors.New("no more items in iterator") + +// We don't support mixed calls to Next and NextPage because they play +// with the paging state in incompatible ways. +var errMixed = errors.New("iterator: Next and NextPage called on same iterator") + +// PageInfo contains information about an iterator's paging state. +type PageInfo struct { + // Token is the token used to retrieve the next page of items from the + // API. You may set Token immediately after creating an iterator to + // begin iteration at a particular point. If Token is the empty string, + // the iterator will begin with the first eligible item. + // + // The result of setting Token after the first call to Next is undefined. + // + // After the underlying API method is called to retrieve a page of items, + // Token is set to the next-page token in the response. + Token string + + // MaxSize is the maximum number of items returned by a call to the API. + // Set MaxSize as a hint to optimize the buffering behavior of the iterator. + // If zero, the page size is determined by the underlying service. + // + // Use Pager to retrieve a page of a specific, exact size. + MaxSize int + + // The error state of the iterator. Manipulated by PageInfo.next and Pager. + // This is a latch: it starts as nil, and once set should never change. + err error + + // If true, no more calls to fetch should be made. Set to true when fetch + // returns an empty page token. The iterator is Done when this is true AND + // the buffer is empty. + atEnd bool + + // Function that fetches a page from the underlying service. It should pass + // the pageSize and pageToken arguments to the service, fill the buffer + // with the results from the call, and return the next-page token returned + // by the service. The function must not remove any existing items from the + // buffer. If the underlying RPC takes an int32 page size, pageSize should + // be silently truncated. + fetch func(pageSize int, pageToken string) (nextPageToken string, err error) + + // Function that returns the number of currently buffered items. + bufLen func() int + + // Function that returns the buffer, after setting the buffer variable to nil. + takeBuf func() interface{} + + // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check + // for calls to both Next and NextPage with the same iterator. + nextCalled, nextPageCalled bool +} + +// NewPageInfo exposes internals for iterator implementations. +// It is not a stable interface. +var NewPageInfo = newPageInfo + +// If an iterator can support paging, its iterator-creating method should call +// this (via the NewPageInfo variable above). +// +// The fetch, bufLen and takeBuf arguments provide access to the +// iterator's internal slice of buffered items. They behave as described in +// PageInfo, above. +// +// The return value is the PageInfo.next method bound to the returned PageInfo value. +// (Returning it avoids exporting PageInfo.next.) +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { + pi := &PageInfo{ + fetch: fetch, + bufLen: bufLen, + takeBuf: takeBuf, + } + return pi, pi.next +} + +// Remaining returns the number of items available before the iterator makes another API call. +func (pi *PageInfo) Remaining() int { return pi.bufLen() } + +// next provides support for an iterator's Next function. An iterator's Next +// should return the error returned by next if non-nil; else it can assume +// there is at least one item in its buffer, and it should return that item and +// remove it from the buffer. +func (pi *PageInfo) next() error { + pi.nextCalled = true + if pi.err != nil { // Once we get an error, always return it. + // TODO(jba): fix so users can retry on transient errors? Probably not worth it. + return pi.err + } + if pi.nextPageCalled { + pi.err = errMixed + return pi.err + } + // Loop until we get some items or reach the end. + for pi.bufLen() == 0 && !pi.atEnd { + if err := pi.fill(pi.MaxSize); err != nil { + pi.err = err + return pi.err + } + if pi.Token == "" { + pi.atEnd = true + } + } + // Either the buffer is non-empty or pi.atEnd is true (or both). + if pi.bufLen() == 0 { + // The buffer is empty and pi.atEnd is true, i.e. the service has no + // more items. + pi.err = Done + } + return pi.err +} + +// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the +// next-page token returned by the call. +// If fill returns a non-nil error, the buffer will be empty. +func (pi *PageInfo) fill(size int) error { + tok, err := pi.fetch(size, pi.Token) + if err != nil { + pi.takeBuf() // clear the buffer + return err + } + pi.Token = tok + return nil +} + +// Pageable is implemented by iterators that support paging. +type Pageable interface { + // PageInfo returns paging information associated with the iterator. + PageInfo() *PageInfo +} + +// Pager supports retrieving iterator items a page at a time. +type Pager struct { + pageInfo *PageInfo + pageSize int +} + +// NewPager returns a pager that uses iter. Calls to its NextPage method will +// obtain exactly pageSize items, unless fewer remain. The pageToken argument +// indicates where to start the iteration. Pass the empty string to start at +// the beginning, or pass a token retrieved from a call to Pager.NextPage. +// +// If you use an iterator with a Pager, you must not call Next on the iterator. +func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { + p := &Pager{ + pageInfo: iter.PageInfo(), + pageSize: pageSize, + } + p.pageInfo.Token = pageToken + if pageSize <= 0 { + p.pageInfo.err = errors.New("iterator: page size must be positive") + } + return p +} + +// NextPage retrieves a sequence of items from the iterator and appends them +// to slicep, which must be a pointer to a slice of the iterator's item type. +// Exactly p.pageSize items will be appended, unless fewer remain. +// +// The first return value is the page token to use for the next page of items. +// If empty, there are no more pages. Aside from checking for the end of the +// iteration, the returned page token is only needed if the iteration is to be +// resumed a later time, in another context (possibly another process). +// +// The second return value is non-nil if an error occurred. It will never be +// the special iterator sentinel value Done. To recognize the end of the +// iteration, compare nextPageToken to the empty string. +// +// It is possible for NextPage to return a single zero-length page along with +// an empty page token when there are no more items in the iteration. +func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { + p.pageInfo.nextPageCalled = true + if p.pageInfo.err != nil { + return "", p.pageInfo.err + } + if p.pageInfo.nextCalled { + p.pageInfo.err = errMixed + return "", p.pageInfo.err + } + if p.pageInfo.bufLen() > 0 { + return "", errors.New("must call NextPage with an empty buffer") + } + // The buffer must be empty here, so takeBuf is a no-op. We call it just to get + // the buffer's type. + wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) + if slicep == nil { + return "", errors.New("nil passed to Pager.NextPage") + } + vslicep := reflect.ValueOf(slicep) + if vslicep.Type() != wantSliceType { + return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) + } + for p.pageInfo.bufLen() < p.pageSize { + if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { + p.pageInfo.err = err + return "", p.pageInfo.err + } + if p.pageInfo.Token == "" { + break + } + } + e := vslicep.Elem() + e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) + return p.pageInfo.Token, nil +} diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go new file mode 100644 index 000000000..0636a8294 --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go new file mode 100644 index 000000000..74d3a4b5b --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.DefaultCredentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.DefaultCredentials)(w) +} + +func WithCredentials(creds *google.DefaultCredentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go new file mode 100644 index 000000000..0a1c2dba9 --- /dev/null +++ b/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,235 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package option contains options for Google API clients. +package option + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option many only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) + o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithAudiences returns a ClientOption that specifies an audience to be used +// as the audience field ("aud") for the JWT token authentication. +func WithAudiences(audience ...string) ClientOption { + return withAudiences(audience) +} + +type withAudiences []string + +func (w withAudiences) Apply(o *internal.DialSettings) { + o.Audiences = make([]string, len(w)) + copy(o.Audiences, w) +} + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } + +// WithQuotaProject returns a ClientOption that specifies the project used +// for quota and billing purposes. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithQuotaProject(quotaProject string) ClientOption { + return withQuotaProject(quotaProject) +} + +type withQuotaProject string + +func (w withQuotaProject) Apply(o *internal.DialSettings) { + o.QuotaProject = string(w) +} + +// WithRequestReason returns a ClientOption that specifies a reason for +// making the request, which is intended to be recorded in audit logging. +// An example reason would be a support-case ticket number. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithRequestReason(requestReason string) ClientOption { + return withRequestReason(requestReason) +} + +type withRequestReason string + +func (w withRequestReason) Apply(o *internal.DialSettings) { + o.RequestReason = string(w) +} diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go new file mode 100644 index 000000000..c55327119 --- /dev/null +++ b/vendor/google.golang.org/api/support/bundler/bundler.go @@ -0,0 +1,349 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bundler supports bundling (batching) of items. Bundling amortizes an +// action with fixed costs over multiple items. For example, if an API provides +// an RPC that accepts a list of items as input, but clients would prefer +// adding items one at a time, then a Bundler can accept individual items from +// the client and bundle many of them into a single RPC. +// +// This package is experimental and subject to change without notice. +package bundler + +import ( + "context" + "errors" + "math" + "reflect" + "sync" + "time" + + "golang.org/x/sync/semaphore" +) + +const ( + DefaultDelayThreshold = time.Second + DefaultBundleCountThreshold = 10 + DefaultBundleByteThreshold = 1e6 // 1M + DefaultBufferedByteLimit = 1e9 // 1G +) + +var ( + // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. + ErrOverflow = errors.New("bundler reached buffered byte limit") + + // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. + ErrOversizedItem = errors.New("item size exceeds bundle byte limit") +) + +// A Bundler collects items added to it into a bundle until the bundle +// exceeds a given size, then calls a user-provided function to handle the bundle. +type Bundler struct { + // Starting from the time that the first message is added to a bundle, once + // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. + DelayThreshold time.Duration + + // Once a bundle has this many items, handle the bundle. Since only one + // item at a time is added to a bundle, no bundle will exceed this + // threshold, so it also serves as a limit. The default is + // DefaultBundleCountThreshold. + BundleCountThreshold int + + // Once the number of bytes in current bundle reaches this threshold, handle + // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, + // but does not cap the total size of a bundle. + BundleByteThreshold int + + // The maximum size of a bundle, in bytes. Zero means unlimited. + BundleByteLimit int + + // The maximum number of bytes that the Bundler will keep in memory before + // returning ErrOverflow. The default is DefaultBufferedByteLimit. + BufferedByteLimit int + + // The maximum number of handler invocations that can be running at once. + // The default is 1. + HandlerLimit int + + handler func(interface{}) // called to handle a bundle + itemSliceZero reflect.Value // nil (zero value) for slice of items + flushTimer *time.Timer // implements DelayThreshold + + mu sync.Mutex + sem *semaphore.Weighted // enforces BufferedByteLimit + semOnce sync.Once + curBundle bundle // incoming items added to this bundle + + // Each bundle is assigned a unique ticket that determines the order in which the + // handler is called. The ticket is assigned with mu locked, but waiting for tickets + // to be handled is done via mu2 and cond, below. + nextTicket uint64 // next ticket to be assigned + + mu2 sync.Mutex + cond *sync.Cond + nextHandled uint64 // next ticket to be handled + + // In this implementation, active uses space proportional to HandlerLimit, and + // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire + // or release occurs, so large values of HandlerLimit max may cause performance + // issues. + active map[uint64]bool // tickets of bundles actively being handled +} + +type bundle struct { + items reflect.Value // slice of item type + size int // size in bytes of all items +} + +// NewBundler creates a new Bundler. +// +// itemExample is a value of the type that will be bundled. For example, if you +// want to create bundles of *Entry, you could pass &Entry{} for itemExample. +// +// handler is a function that will be called on each bundle. If itemExample is +// of type T, the argument to handler is of type []T. handler is always called +// sequentially for each bundle, and never in parallel. +// +// Configure the Bundler by setting its thresholds and limits before calling +// any of its methods. +func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { + b := &Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultBundleCountThreshold, + BundleByteThreshold: DefaultBundleByteThreshold, + BufferedByteLimit: DefaultBufferedByteLimit, + HandlerLimit: 1, + + handler: handler, + itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), + active: map[uint64]bool{}, + } + b.curBundle.items = b.itemSliceZero + b.cond = sync.NewCond(&b.mu2) + return b +} + +func (b *Bundler) initSemaphores() { + // Create the semaphores lazily, because the user may set limits + // after NewBundler. + b.semOnce.Do(func() { + b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) + }) +} + +// Add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. Add returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed +// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for +// memory, Add returns ErrOverflow. +// +// Add never blocks. +func (b *Bundler) Add(item interface{}, size int) error { + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory + // footprint, we can't accept it. + // (TryAcquire also returns false if anything is waiting on the semaphore, + // so calls to Add and AddWait shouldn't be mixed.) + b.initSemaphores() + if !b.sem.TryAcquire(int64(size)) { + return ErrOverflow + } + b.add(item, size) + return nil +} + +// add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +func (b *Bundler) add(item interface{}, size int) { + b.mu.Lock() + defer b.mu.Unlock() + + // If adding this item to the current bundle would cause it to exceed the + // maximum bundle size, close the current bundle and start a new one. + if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit { + b.startFlushLocked() + } + // Add the item. + b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item)) + b.curBundle.size += size + + // Start a timer to flush the item if one isn't already running. + // startFlushLocked clears the timer and closes the bundle at the same time, + // so we only allocate a new timer for the first item in each bundle. + // (We could try to call Reset on the timer instead, but that would add a lot + // of complexity to the code just to save one small allocation.) + if b.flushTimer == nil { + b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush) + } + + // If the current bundle equals the count threshold, close it. + if b.curBundle.items.Len() == b.BundleCountThreshold { + b.startFlushLocked() + } + // If the current bundle equals or exceeds the byte threshold, close it. + if b.curBundle.size >= b.BundleByteThreshold { + b.startFlushLocked() + } +} + +// AddWait adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. AddWait returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), +// AddWait blocks until space is available or ctx is done. +// +// Calls to Add and AddWait should not be mixed on the same Bundler. +func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory footprint, block + // until space is available. The semaphore is FIFO, so there will be no + // starvation. + b.initSemaphores() + if err := b.sem.Acquire(ctx, int64(size)); err != nil { + return err + } + // Here, we've reserved space for item. Other goroutines can call AddWait + // and even acquire space, but no one can take away our reservation + // (assuming sem.Release is used correctly). So there is no race condition + // resulting from locking the mutex after sem.Acquire returns. + b.add(item, size) + return nil +} + +// Flush invokes the handler for all remaining items in the Bundler and waits +// for it to return. +func (b *Bundler) Flush() { + b.mu.Lock() + b.startFlushLocked() + // Here, all bundles with tickets < b.nextTicket are + // either finished or active. Those are the ones + // we want to wait for. + t := b.nextTicket + b.mu.Unlock() + b.initSemaphores() + b.waitUntilAllHandled(t) +} + +func (b *Bundler) startFlushLocked() { + if b.flushTimer != nil { + b.flushTimer.Stop() + b.flushTimer = nil + } + if b.curBundle.items.Len() == 0 { + return + } + // Here, both semaphores must have been initialized. + bun := b.curBundle + b.curBundle = bundle{items: b.itemSliceZero} + ticket := b.nextTicket + b.nextTicket++ + go func() { + defer func() { + b.sem.Release(int64(bun.size)) + b.release(ticket) + }() + b.acquire(ticket) + b.handler(bun.items.Interface()) + }() +} + +// acquire blocks until ticket is the next to be served, then returns. In order for N +// acquire calls to return, the tickets must be in the range [0, N). A ticket must +// not be presented to acquire more than once. +func (b *Bundler) acquire(ticket uint64) { + b.mu2.Lock() + defer b.mu2.Unlock() + if ticket < b.nextHandled { + panic("bundler: acquire: arg too small") + } + for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) { + b.cond.Wait() + } + // Here, + // ticket == b.nextHandled: the caller is the next one to be handled; + // and len(b.active) < b.HandlerLimit: there is space available. + b.active[ticket] = true + b.nextHandled++ + // Broadcast, not Signal: although at most one acquire waiter can make progress, + // there might be waiters in waitUntilAllHandled. + b.cond.Broadcast() +} + +// If a ticket is used for a call to acquire, it must later be passed to release. A +// ticket must not be presented to release more than once. +func (b *Bundler) release(ticket uint64) { + b.mu2.Lock() + defer b.mu2.Unlock() + if !b.active[ticket] { + panic("bundler: release: not an active ticket") + } + delete(b.active, ticket) + b.cond.Broadcast() +} + +// waitUntilAllHandled blocks until all tickets < n have called release, meaning +// all bundles with tickets < n have been handled. +func (b *Bundler) waitUntilAllHandled(n uint64) { + // Proof of correctness of this function. + // "N is acquired" means acquire(N) has returned. + // "N is released" means release(N) has returned. + // 1. If N is acquired, N-1 is acquired. + // Follows from the loop test in acquire, and the fact + // that nextHandled is incremented by 1. + // 2. If nextHandled >= N, then N-1 is acquired. + // Because we only increment nextHandled to N after N-1 is acquired. + // 3. If nextHandled >= N, then all n < N is acquired. + // Follows from #1 and #2. + // 4. If N is acquired and N is not in active, then N is released. + // Because we put N in active before acquire returns, and only + // remove it when it is released. + // Let min(active) be the smallest member of active, or infinity if active is empty. + // 5. If nextHandled >= N and N <= min(active), then all n < N is released. + // From nextHandled >= N and #3, all n < N is acquired. + // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active. + // So from #4, all n < N is released. + // The loop test below is the antecedent of #5. + b.mu2.Lock() + defer b.mu2.Unlock() + for !(b.nextHandled >= n && n <= min(b.active)) { + b.cond.Wait() + } +} + +// min returns the minimum value of the set s, or the largest uint64 if +// s is empty. +func min(s map[uint64]bool) uint64 { + var m uint64 = math.MaxUint64 + for n := range s { + if n < m { + m = n + } + } + return m +} diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go new file mode 100644 index 000000000..1fb7cf905 --- /dev/null +++ b/vendor/google.golang.org/api/transport/dial.go @@ -0,0 +1,46 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "net/http" + + "google.golang.org/grpc" + + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + htransport "google.golang.org/api/transport/http" +) + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + return htransport.NewClient(ctx, opts...) +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.Dial(ctx, opts...) +} + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.DialInsecure(ctx, opts...) +} diff --git a/vendor/google.golang.org/api/transport/doc.go b/vendor/google.golang.org/api/transport/doc.go new file mode 100644 index 000000000..4915036c3 --- /dev/null +++ b/vendor/google.golang.org/api/transport/doc.go @@ -0,0 +1,21 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides utility methods for creating authenticated +// transports to Google's HTTP and gRPC APIs. It is intended to be used in +// conjunction with google.golang.org/api/option. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport diff --git a/vendor/google.golang.org/api/transport/go19.go b/vendor/google.golang.org/api/transport/go19.go new file mode 100644 index 000000000..3e89f9328 --- /dev/null +++ b/vendor/google.golang.org/api/transport/go19.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package transport + +import ( + "context" + + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.Credentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go new file mode 100644 index 000000000..b850246ce --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -0,0 +1,209 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package grpc supports network connections to GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package grpc + +import ( + "context" + "errors" + "log" + "os" + "strings" + + "go.opencensus.io/plugin/ocgrpc" + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + grpcgoogle "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/oauth" + + // Install grpclb, which is required for direct path. + _ "google.golang.org/grpc/balancer/grpclb" +) + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineDialerHook func(context.Context) grpc.DialOption + +// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. +var timeoutDialerOption grpc.DialOption + +// Dial returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, false, opts) +} + +// DialInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, true, opts) +} + +func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc.ClientConn, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP client specified") + } + if o.GRPCConn != nil { + return o.GRPCConn, nil + } + var grpcOpts []grpc.DialOption + if insecure { + grpcOpts = []grpc.DialOption{grpc.WithInsecure()} + } else if !o.NoAuth { + if o.APIKey != "" { + log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") + } + creds, err := internal.Creds(ctx, &o) + if err != nil { + return nil, err + } + // Attempt Direct Path only if: + // * The endpoint is a host:port (or dns:///host:port). + // * Credentials are obtained via GCE metadata server, using the default + // service account. + // * Opted in via GOOGLE_CLOUD_ENABLE_DIRECT_PATH environment variable. + // For example, GOOGLE_CLOUD_ENABLE_DIRECT_PATH=spanner,pubsub + if isDirectPathEnabled(o.Endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { + if !strings.HasPrefix(o.Endpoint, "dns:///") { + o.Endpoint = "dns:///" + o.Endpoint + } + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle( + grpcgoogle.NewComputeEngineCredentials(), + ), + } + // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. + } else { + grpcOpts = []grpc.DialOption{ + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } + } + } + + if appengineDialerHook != nil { + // Use the Socket API on App Engine. + // appengine dialer will override socketopt dialer + grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) + } + + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts) + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + + // TODO(weiranf): This socketopt dialer will be used by default at some + // point when isDirectPathEnabled will default to true, we guard it by + // the Directpath env var for now once we can introspect user defined + // dialer (https://github.com/grpc/grpc-go/issues/2795). + if timeoutDialerOption != nil && isDirectPathEnabled(o.Endpoint) { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + + return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) +} + +func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} + +// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. +type grpcTokenSource struct { + oauth.TokenSource + + // Additional metadata attached as headers. + quotaProject string + requestReason string +} + +// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. +func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( + map[string]string, error) { + metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) + if err != nil { + return nil, err + } + + // Attach system parameter + if ts.quotaProject != "" { + metadata["X-goog-user-project"] = ts.quotaProject + } + if ts.requestReason != "" { + metadata["X-goog-request-reason"] = ts.requestReason + } + return metadata, nil +} + +func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool { + if ts == nil { + return false + } + tok, err := ts.Token() + if err != nil { + return false + } + if tok == nil { + return false + } + if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { + return false + } + if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { + return false + } + return true +} + +func isDirectPathEnabled(endpoint string) bool { + // Only host:port is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + // + // TODO(cbro): once gRPC has introspectible options, check the user hasn't + // provided a custom dialer in gRPC options. + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + // Only try direct path if the user has opted in via the environment variable. + whitelist := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") + for _, api := range whitelist { + // Ignore empty string since an empty env variable splits into [""] + if api != "" && strings.Contains(endpoint, api) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go new file mode 100644 index 000000000..87819d4e1 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go @@ -0,0 +1,41 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package grpc + +import ( + "context" + "net" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + "google.golang.org/grpc" +) + +func init() { + // NOTE: dev_appserver doesn't currently support SSL. + // When it does, this code can be removed. + if appengine.IsDevAppServer() { + return + } + + appengineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go new file mode 100644 index 000000000..2b1d9e99b --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -0,0 +1,59 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11,linux + +package grpc + +import ( + "context" + "net" + "syscall" + + "golang.org/x/sys/unix" + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 000000000..c0d8bf20b --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,161 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "context" + "errors" + "net/http" + + "go.opencensus.io/plugin/ochttp" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/http/internal/propagation" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, settings.Endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, settings.Endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + trans := base + trans = parameterTransport{ + base: trans, + userAgent: settings.UserAgent, + quotaProject: settings.QuotaProject, + requestReason: settings.RequestReason, + } + trans = addOCTransport(trans) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + trans = &oauth2.Transport{ + Base: trans, + Source: creds.TokenSource, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type parameterTransport struct { + userAgent string + quotaProject string + requestReason string + + base http.RoundTripper +} + +func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + if t.userAgent == "" { + return rt.RoundTrip(req) + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + + // Attach system parameters into the header + if t.quotaProject != "" { + newReq.Header.Set("X-Goog-User-Project", t.quotaProject) + } + if t.requestReason != "" { + newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) + } + + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. +func defaultBaseTransport(ctx context.Context) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + return http.DefaultTransport +} + +func addOCTransport(trans http.RoundTripper) http.RoundTripper { + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go new file mode 100644 index 000000000..04c81413c --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -0,0 +1,30 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package http + +import ( + "context" + "net/http" + + "google.golang.org/appengine/urlfetch" +) + +func init() { + appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { + return &urlfetch.Transport{Context: ctx} + } +} diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go new file mode 100644 index 000000000..24b4f0d29 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/google.golang.org/api/transport/not_go19.go b/vendor/google.golang.org/api/transport/not_go19.go new file mode 100644 index 000000000..0cb627594 --- /dev/null +++ b/vendor/google.golang.org/api/transport/not_go19.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package transport + +import ( + "context" + + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.DefaultCredentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.DefaultCredentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 000000000..8c9697674 --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,135 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine // import "google.golang.org/appengine" + +import ( + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for an app running in App Engine. +// +// On App Engine Flexible it installs a trivial health checker if one isn't +// already registered, and starts listening on port 8080 (overridden by the +// $PORT environment variable). +// +// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// On App Engine Standard it ensures the server has started and is prepared to +// receive requests. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// IsStandard reports whether the App Engine app is running in the standard +// environment. This includes both the first generation runtimes (<= Go 1.9) +// and the second generation runtimes (>= Go 1.11). +func IsStandard() bool { + return internal.IsStandard() +} + +// IsFlex reports whether the App Engine app is running in the flexible environment. +func IsFlex() bool { + return internal.IsFlex() +} + +// IsAppEngine reports whether the App Engine app is running on App Engine, in either +// the standard or flexible environment. +func IsAppEngine() bool { + return internal.IsAppEngine() +} + +// IsSecondGen reports whether the App Engine app is running on the second generation +// runtimes (>= Go 1.11). +func IsSecondGen() bool { + return internal.IsSecondGen() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return internal.ReqContext(req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 000000000..f4b645aad --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,20 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package appengine + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// BackgroundContext returns a context not associated with a request. +// This should only be used when not servicing a request. +// This only works in App Engine "flexible environment". +func BackgroundContext() context.Context { + return internal.BackgroundContext() +} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 000000000..16d0772e2 --- /dev/null +++ b/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 000000000..b8dcf8f36 --- /dev/null +++ b/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,142 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 000000000..9a2ff77ab --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,611 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} +func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} +} + +type AppIdentityServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} +func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} +} +func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) +} +func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) +} +func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) +} +func (m *AppIdentityServiceError) XXX_Size() int { + return xxx_messageInfo_AppIdentityServiceError.Size(m) +} +func (m *AppIdentityServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} +func (*SignForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} +} +func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) +} +func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) +} +func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppRequest.Merge(dst, src) +} +func (m *SignForAppRequest) XXX_Size() int { + return xxx_messageInfo_SignForAppRequest.Size(m) +} +func (m *SignForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} +func (*SignForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} +} +func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) +} +func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) +} +func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppResponse.Merge(dst, src) +} +func (m *SignForAppResponse) XXX_Size() int { + return xxx_messageInfo_SignForAppResponse.Size(m) +} +func (m *SignForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} +func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} +} +func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) +} +func (m *GetPublicCertificateForAppRequest) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) +} +func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} +func (*PublicCertificate) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} +} +func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) +} +func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) +} +func (dst *PublicCertificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PublicCertificate.Merge(dst, src) +} +func (m *PublicCertificate) XXX_Size() int { + return xxx_messageInfo_PublicCertificate.Size(m) +} +func (m *PublicCertificate) XXX_DiscardUnknown() { + xxx_messageInfo_PublicCertificate.DiscardUnknown(m) +} + +var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} +func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} +} +func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) +} +func (m *GetPublicCertificateForAppResponse) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) +} +func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} +func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} +} +func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) +} +func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) +} +func (m *GetServiceAccountNameRequest) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) +} +func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} +func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} +} +func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) +} +func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) +} +func (m *GetServiceAccountNameResponse) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) +} +func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} +func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} +} +func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) +} +func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) +} +func (m *GetAccessTokenRequest) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenRequest.Size(m) +} +func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} +func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} +} +func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) +} +func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) +} +func (m *GetAccessTokenResponse) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenResponse.Size(m) +} +func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} +func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} +func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { + proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") + proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") + proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") + proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") + proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") + proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") + proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") + proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") + proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") + proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") + proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") + proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) +} + +var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ + // 676 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, + 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, + 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, + 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, + 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, + 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, + 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, + 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, + 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, + 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, + 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, + 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, + 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, + 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, + 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, + 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, + 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, + 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, + 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, + 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, + 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, + 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, + 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, + 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, + 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, + 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, + 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, + 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, + 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, + 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, + 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, + 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, + 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, + 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, + 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, + 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, + 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, + 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, + 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, + 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, + 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, + 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, + 0xf3, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 000000000..ddfc0c04a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,786 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/modules/modules_service.proto + +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} +func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} +} + +type ModulesServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} +func (*ModulesServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} +} +func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) +} +func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) +} +func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModulesServiceError.Merge(dst, src) +} +func (m *ModulesServiceError) XXX_Size() int { + return xxx_messageInfo_ModulesServiceError.Size(m) +} +func (m *ModulesServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo + +type GetModulesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} +func (*GetModulesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} +} +func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) +} +func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) +} +func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesRequest.Merge(dst, src) +} +func (m *GetModulesRequest) XXX_Size() int { + return xxx_messageInfo_GetModulesRequest.Size(m) +} +func (m *GetModulesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} +func (*GetModulesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} +} +func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) +} +func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) +} +func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesResponse.Merge(dst, src) +} +func (m *GetModulesResponse) XXX_Size() int { + return xxx_messageInfo_GetModulesResponse.Size(m) +} +func (m *GetModulesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} +func (*GetVersionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} +} +func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) +} +func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsRequest.Merge(dst, src) +} +func (m *GetVersionsRequest) XXX_Size() int { + return xxx_messageInfo_GetVersionsRequest.Size(m) +} +func (m *GetVersionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} +func (*GetVersionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} +} +func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) +} +func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) +} +func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsResponse.Merge(dst, src) +} +func (m *GetVersionsResponse) XXX_Size() int { + return xxx_messageInfo_GetVersionsResponse.Size(m) +} +func (m *GetVersionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} +func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} +} +func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) +} +func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) +} +func (m *GetDefaultVersionRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionRequest.Size(m) +} +func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} +func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} +} +func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) +} +func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) +} +func (m *GetDefaultVersionResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionResponse.Size(m) +} +func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} +func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} +} +func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) +} +func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) +} +func (m *GetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesRequest.Size(m) +} +func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} +func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} +} +func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) +} +func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) +} +func (m *GetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesResponse.Size(m) +} +func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} +func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} +} +func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) +} +func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) +} +func (m *SetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesRequest.Size(m) +} +func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} +func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} +} +func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) +} +func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) +} +func (m *SetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesResponse.Size(m) +} +func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} +func (*StartModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} +} +func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) +} +func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleRequest.Merge(dst, src) +} +func (m *StartModuleRequest) XXX_Size() int { + return xxx_messageInfo_StartModuleRequest.Size(m) +} +func (m *StartModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} +func (*StartModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} +} +func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) +} +func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleResponse.Merge(dst, src) +} +func (m *StartModuleResponse) XXX_Size() int { + return xxx_messageInfo_StartModuleResponse.Size(m) +} +func (m *StartModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} +func (*StopModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} +} +func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) +} +func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleRequest.Merge(dst, src) +} +func (m *StopModuleRequest) XXX_Size() int { + return xxx_messageInfo_StopModuleRequest.Size(m) +} +func (m *StopModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} +func (*StopModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} +} +func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) +} +func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleResponse.Merge(dst, src) +} +func (m *StopModuleResponse) XXX_Size() int { + return xxx_messageInfo_StopModuleResponse.Size(m) +} +func (m *StopModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} +func (*GetHostnameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} +} +func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) +} +func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) +} +func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameRequest.Merge(dst, src) +} +func (m *GetHostnameRequest) XXX_Size() int { + return xxx_messageInfo_GetHostnameRequest.Size(m) +} +func (m *GetHostnameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} +func (*GetHostnameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} +} +func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) +} +func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) +} +func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameResponse.Merge(dst, src) +} +func (m *GetHostnameResponse) XXX_Size() int { + return xxx_messageInfo_GetHostnameResponse.Size(m) +} +func (m *GetHostnameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { + proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") + proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") + proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") + proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") + proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") + proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") + proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") + proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") + proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") + proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") + proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") + proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") + proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") + proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") + proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") + proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") + proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) +} + +var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, + 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, + 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, + 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, + 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, + 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, + 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, + 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, + 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, + 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, + 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, + 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, + 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, + 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, + 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, + 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, + 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, + 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, + 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, + 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, + 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, + 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, + 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, + 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, + 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, + 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, + 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, + 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go new file mode 100644 index 000000000..4ec872e46 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go @@ -0,0 +1,2822 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/socket/socket_service.proto + +package socket + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RemoteSocketServiceError_ErrorCode int32 + +const ( + RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 + RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 + RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 + RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 + RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 + RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 +) + +var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ + 1: "SYSTEM_ERROR", + 2: "GAI_ERROR", + 4: "FAILURE", + 5: "PERMISSION_DENIED", + 6: "INVALID_REQUEST", + 7: "SOCKET_CLOSED", +} +var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ + "SYSTEM_ERROR": 1, + "GAI_ERROR": 2, + "FAILURE": 4, + "PERMISSION_DENIED": 5, + "INVALID_REQUEST": 6, + "SOCKET_CLOSED": 7, +} + +func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { + p := new(RemoteSocketServiceError_ErrorCode) + *p = x + return p +} +func (x RemoteSocketServiceError_ErrorCode) String() string { + return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) +} +func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") + if err != nil { + return err + } + *x = RemoteSocketServiceError_ErrorCode(value) + return nil +} +func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} +} + +type RemoteSocketServiceError_SystemError int32 + +const ( + RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 + RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 + RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 + RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 + RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 + RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 + RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 + RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 + RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 + RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 + RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 + RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 + RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 + RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 + RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 + RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 + RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 + RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 + RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 + RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 + RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 + RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 + RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 + RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 + RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 + RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 + RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 + RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 + RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 + RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 + RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 + RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 + RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 + RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 + RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 + RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 + RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 + RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 + RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 + RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 + RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 + RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 + RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 + RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 + RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 + RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 + RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 + RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 + RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 + RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 + RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 + RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 + RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 + RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 + RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 + RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 + RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 + RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 + RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 + RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 + RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 + RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 + RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 + RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 + RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 + RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 + RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 + RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 + RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 + RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 + RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 + RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 + RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 + RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 + RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 + RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 + RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 + RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 + RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 + RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 + RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 + RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 + RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 + RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 + RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 + RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 + RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 + RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 + RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 + RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 + RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 + RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 + RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 + RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 + RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 + RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 + RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 + RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 + RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 + RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 + RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 + RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 + RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 + RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 + RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 + RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 + RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 + RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 + RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 + RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 + RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 + RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 + RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 + RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 + RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 + RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 + RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 + RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 + RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 + RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 + RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 + RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 + RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 + RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 + RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 + RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 + RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 + RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 +) + +var RemoteSocketServiceError_SystemError_name = map[int32]string{ + 0: "SYS_SUCCESS", + 1: "SYS_EPERM", + 2: "SYS_ENOENT", + 3: "SYS_ESRCH", + 4: "SYS_EINTR", + 5: "SYS_EIO", + 6: "SYS_ENXIO", + 7: "SYS_E2BIG", + 8: "SYS_ENOEXEC", + 9: "SYS_EBADF", + 10: "SYS_ECHILD", + 11: "SYS_EAGAIN", + // Duplicate value: 11: "SYS_EWOULDBLOCK", + 12: "SYS_ENOMEM", + 13: "SYS_EACCES", + 14: "SYS_EFAULT", + 15: "SYS_ENOTBLK", + 16: "SYS_EBUSY", + 17: "SYS_EEXIST", + 18: "SYS_EXDEV", + 19: "SYS_ENODEV", + 20: "SYS_ENOTDIR", + 21: "SYS_EISDIR", + 22: "SYS_EINVAL", + 23: "SYS_ENFILE", + 24: "SYS_EMFILE", + 25: "SYS_ENOTTY", + 26: "SYS_ETXTBSY", + 27: "SYS_EFBIG", + 28: "SYS_ENOSPC", + 29: "SYS_ESPIPE", + 30: "SYS_EROFS", + 31: "SYS_EMLINK", + 32: "SYS_EPIPE", + 33: "SYS_EDOM", + 34: "SYS_ERANGE", + 35: "SYS_EDEADLK", + // Duplicate value: 35: "SYS_EDEADLOCK", + 36: "SYS_ENAMETOOLONG", + 37: "SYS_ENOLCK", + 38: "SYS_ENOSYS", + 39: "SYS_ENOTEMPTY", + 40: "SYS_ELOOP", + 42: "SYS_ENOMSG", + 43: "SYS_EIDRM", + 44: "SYS_ECHRNG", + 45: "SYS_EL2NSYNC", + 46: "SYS_EL3HLT", + 47: "SYS_EL3RST", + 48: "SYS_ELNRNG", + 49: "SYS_EUNATCH", + 50: "SYS_ENOCSI", + 51: "SYS_EL2HLT", + 52: "SYS_EBADE", + 53: "SYS_EBADR", + 54: "SYS_EXFULL", + 55: "SYS_ENOANO", + 56: "SYS_EBADRQC", + 57: "SYS_EBADSLT", + 59: "SYS_EBFONT", + 60: "SYS_ENOSTR", + 61: "SYS_ENODATA", + 62: "SYS_ETIME", + 63: "SYS_ENOSR", + 64: "SYS_ENONET", + 65: "SYS_ENOPKG", + 66: "SYS_EREMOTE", + 67: "SYS_ENOLINK", + 68: "SYS_EADV", + 69: "SYS_ESRMNT", + 70: "SYS_ECOMM", + 71: "SYS_EPROTO", + 72: "SYS_EMULTIHOP", + 73: "SYS_EDOTDOT", + 74: "SYS_EBADMSG", + 75: "SYS_EOVERFLOW", + 76: "SYS_ENOTUNIQ", + 77: "SYS_EBADFD", + 78: "SYS_EREMCHG", + 79: "SYS_ELIBACC", + 80: "SYS_ELIBBAD", + 81: "SYS_ELIBSCN", + 82: "SYS_ELIBMAX", + 83: "SYS_ELIBEXEC", + 84: "SYS_EILSEQ", + 85: "SYS_ERESTART", + 86: "SYS_ESTRPIPE", + 87: "SYS_EUSERS", + 88: "SYS_ENOTSOCK", + 89: "SYS_EDESTADDRREQ", + 90: "SYS_EMSGSIZE", + 91: "SYS_EPROTOTYPE", + 92: "SYS_ENOPROTOOPT", + 93: "SYS_EPROTONOSUPPORT", + 94: "SYS_ESOCKTNOSUPPORT", + 95: "SYS_EOPNOTSUPP", + // Duplicate value: 95: "SYS_ENOTSUP", + 96: "SYS_EPFNOSUPPORT", + 97: "SYS_EAFNOSUPPORT", + 98: "SYS_EADDRINUSE", + 99: "SYS_EADDRNOTAVAIL", + 100: "SYS_ENETDOWN", + 101: "SYS_ENETUNREACH", + 102: "SYS_ENETRESET", + 103: "SYS_ECONNABORTED", + 104: "SYS_ECONNRESET", + 105: "SYS_ENOBUFS", + 106: "SYS_EISCONN", + 107: "SYS_ENOTCONN", + 108: "SYS_ESHUTDOWN", + 109: "SYS_ETOOMANYREFS", + 110: "SYS_ETIMEDOUT", + 111: "SYS_ECONNREFUSED", + 112: "SYS_EHOSTDOWN", + 113: "SYS_EHOSTUNREACH", + 114: "SYS_EALREADY", + 115: "SYS_EINPROGRESS", + 116: "SYS_ESTALE", + 117: "SYS_EUCLEAN", + 118: "SYS_ENOTNAM", + 119: "SYS_ENAVAIL", + 120: "SYS_EISNAM", + 121: "SYS_EREMOTEIO", + 122: "SYS_EDQUOT", + 123: "SYS_ENOMEDIUM", + 124: "SYS_EMEDIUMTYPE", + 125: "SYS_ECANCELED", + 126: "SYS_ENOKEY", + 127: "SYS_EKEYEXPIRED", + 128: "SYS_EKEYREVOKED", + 129: "SYS_EKEYREJECTED", + 130: "SYS_EOWNERDEAD", + 131: "SYS_ENOTRECOVERABLE", + 132: "SYS_ERFKILL", +} +var RemoteSocketServiceError_SystemError_value = map[string]int32{ + "SYS_SUCCESS": 0, + "SYS_EPERM": 1, + "SYS_ENOENT": 2, + "SYS_ESRCH": 3, + "SYS_EINTR": 4, + "SYS_EIO": 5, + "SYS_ENXIO": 6, + "SYS_E2BIG": 7, + "SYS_ENOEXEC": 8, + "SYS_EBADF": 9, + "SYS_ECHILD": 10, + "SYS_EAGAIN": 11, + "SYS_EWOULDBLOCK": 11, + "SYS_ENOMEM": 12, + "SYS_EACCES": 13, + "SYS_EFAULT": 14, + "SYS_ENOTBLK": 15, + "SYS_EBUSY": 16, + "SYS_EEXIST": 17, + "SYS_EXDEV": 18, + "SYS_ENODEV": 19, + "SYS_ENOTDIR": 20, + "SYS_EISDIR": 21, + "SYS_EINVAL": 22, + "SYS_ENFILE": 23, + "SYS_EMFILE": 24, + "SYS_ENOTTY": 25, + "SYS_ETXTBSY": 26, + "SYS_EFBIG": 27, + "SYS_ENOSPC": 28, + "SYS_ESPIPE": 29, + "SYS_EROFS": 30, + "SYS_EMLINK": 31, + "SYS_EPIPE": 32, + "SYS_EDOM": 33, + "SYS_ERANGE": 34, + "SYS_EDEADLK": 35, + "SYS_EDEADLOCK": 35, + "SYS_ENAMETOOLONG": 36, + "SYS_ENOLCK": 37, + "SYS_ENOSYS": 38, + "SYS_ENOTEMPTY": 39, + "SYS_ELOOP": 40, + "SYS_ENOMSG": 42, + "SYS_EIDRM": 43, + "SYS_ECHRNG": 44, + "SYS_EL2NSYNC": 45, + "SYS_EL3HLT": 46, + "SYS_EL3RST": 47, + "SYS_ELNRNG": 48, + "SYS_EUNATCH": 49, + "SYS_ENOCSI": 50, + "SYS_EL2HLT": 51, + "SYS_EBADE": 52, + "SYS_EBADR": 53, + "SYS_EXFULL": 54, + "SYS_ENOANO": 55, + "SYS_EBADRQC": 56, + "SYS_EBADSLT": 57, + "SYS_EBFONT": 59, + "SYS_ENOSTR": 60, + "SYS_ENODATA": 61, + "SYS_ETIME": 62, + "SYS_ENOSR": 63, + "SYS_ENONET": 64, + "SYS_ENOPKG": 65, + "SYS_EREMOTE": 66, + "SYS_ENOLINK": 67, + "SYS_EADV": 68, + "SYS_ESRMNT": 69, + "SYS_ECOMM": 70, + "SYS_EPROTO": 71, + "SYS_EMULTIHOP": 72, + "SYS_EDOTDOT": 73, + "SYS_EBADMSG": 74, + "SYS_EOVERFLOW": 75, + "SYS_ENOTUNIQ": 76, + "SYS_EBADFD": 77, + "SYS_EREMCHG": 78, + "SYS_ELIBACC": 79, + "SYS_ELIBBAD": 80, + "SYS_ELIBSCN": 81, + "SYS_ELIBMAX": 82, + "SYS_ELIBEXEC": 83, + "SYS_EILSEQ": 84, + "SYS_ERESTART": 85, + "SYS_ESTRPIPE": 86, + "SYS_EUSERS": 87, + "SYS_ENOTSOCK": 88, + "SYS_EDESTADDRREQ": 89, + "SYS_EMSGSIZE": 90, + "SYS_EPROTOTYPE": 91, + "SYS_ENOPROTOOPT": 92, + "SYS_EPROTONOSUPPORT": 93, + "SYS_ESOCKTNOSUPPORT": 94, + "SYS_EOPNOTSUPP": 95, + "SYS_ENOTSUP": 95, + "SYS_EPFNOSUPPORT": 96, + "SYS_EAFNOSUPPORT": 97, + "SYS_EADDRINUSE": 98, + "SYS_EADDRNOTAVAIL": 99, + "SYS_ENETDOWN": 100, + "SYS_ENETUNREACH": 101, + "SYS_ENETRESET": 102, + "SYS_ECONNABORTED": 103, + "SYS_ECONNRESET": 104, + "SYS_ENOBUFS": 105, + "SYS_EISCONN": 106, + "SYS_ENOTCONN": 107, + "SYS_ESHUTDOWN": 108, + "SYS_ETOOMANYREFS": 109, + "SYS_ETIMEDOUT": 110, + "SYS_ECONNREFUSED": 111, + "SYS_EHOSTDOWN": 112, + "SYS_EHOSTUNREACH": 113, + "SYS_EALREADY": 114, + "SYS_EINPROGRESS": 115, + "SYS_ESTALE": 116, + "SYS_EUCLEAN": 117, + "SYS_ENOTNAM": 118, + "SYS_ENAVAIL": 119, + "SYS_EISNAM": 120, + "SYS_EREMOTEIO": 121, + "SYS_EDQUOT": 122, + "SYS_ENOMEDIUM": 123, + "SYS_EMEDIUMTYPE": 124, + "SYS_ECANCELED": 125, + "SYS_ENOKEY": 126, + "SYS_EKEYEXPIRED": 127, + "SYS_EKEYREVOKED": 128, + "SYS_EKEYREJECTED": 129, + "SYS_EOWNERDEAD": 130, + "SYS_ENOTRECOVERABLE": 131, + "SYS_ERFKILL": 132, +} + +func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { + p := new(RemoteSocketServiceError_SystemError) + *p = x + return p +} +func (x RemoteSocketServiceError_SystemError) String() string { + return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) +} +func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") + if err != nil { + return err + } + *x = RemoteSocketServiceError_SystemError(value) + return nil +} +func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} +} + +type CreateSocketRequest_SocketFamily int32 + +const ( + CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 + CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 +) + +var CreateSocketRequest_SocketFamily_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var CreateSocketRequest_SocketFamily_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { + p := new(CreateSocketRequest_SocketFamily) + *p = x + return p +} +func (x CreateSocketRequest_SocketFamily) String() string { + return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) +} +func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketFamily(value) + return nil +} +func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} +} + +type CreateSocketRequest_SocketProtocol int32 + +const ( + CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 + CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 +) + +var CreateSocketRequest_SocketProtocol_name = map[int32]string{ + 1: "TCP", + 2: "UDP", +} +var CreateSocketRequest_SocketProtocol_value = map[string]int32{ + "TCP": 1, + "UDP": 2, +} + +func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { + p := new(CreateSocketRequest_SocketProtocol) + *p = x + return p +} +func (x CreateSocketRequest_SocketProtocol) String() string { + return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) +} +func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketProtocol(value) + return nil +} +func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} +} + +type SocketOption_SocketOptionLevel int32 + +const ( + SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 + SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 + SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 + SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 +) + +var SocketOption_SocketOptionLevel_name = map[int32]string{ + 0: "SOCKET_SOL_IP", + 1: "SOCKET_SOL_SOCKET", + 6: "SOCKET_SOL_TCP", + 17: "SOCKET_SOL_UDP", +} +var SocketOption_SocketOptionLevel_value = map[string]int32{ + "SOCKET_SOL_IP": 0, + "SOCKET_SOL_SOCKET": 1, + "SOCKET_SOL_TCP": 6, + "SOCKET_SOL_UDP": 17, +} + +func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { + p := new(SocketOption_SocketOptionLevel) + *p = x + return p +} +func (x SocketOption_SocketOptionLevel) String() string { + return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) +} +func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") + if err != nil { + return err + } + *x = SocketOption_SocketOptionLevel(value) + return nil +} +func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} +} + +type SocketOption_SocketOptionName int32 + +const ( + SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 + SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 + SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 + SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 + SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 +) + +var SocketOption_SocketOptionName_name = map[int32]string{ + 1: "SOCKET_SO_DEBUG", + 2: "SOCKET_SO_REUSEADDR", + 3: "SOCKET_SO_TYPE", + 4: "SOCKET_SO_ERROR", + 5: "SOCKET_SO_DONTROUTE", + 6: "SOCKET_SO_BROADCAST", + 7: "SOCKET_SO_SNDBUF", + 8: "SOCKET_SO_RCVBUF", + 9: "SOCKET_SO_KEEPALIVE", + 10: "SOCKET_SO_OOBINLINE", + 13: "SOCKET_SO_LINGER", + 20: "SOCKET_SO_RCVTIMEO", + 21: "SOCKET_SO_SNDTIMEO", + // Duplicate value: 1: "SOCKET_IP_TOS", + // Duplicate value: 2: "SOCKET_IP_TTL", + // Duplicate value: 3: "SOCKET_IP_HDRINCL", + // Duplicate value: 4: "SOCKET_IP_OPTIONS", + // Duplicate value: 1: "SOCKET_TCP_NODELAY", + // Duplicate value: 2: "SOCKET_TCP_MAXSEG", + // Duplicate value: 3: "SOCKET_TCP_CORK", + // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", + // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", + // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", + // Duplicate value: 7: "SOCKET_TCP_SYNCNT", + // Duplicate value: 8: "SOCKET_TCP_LINGER2", + // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", + // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", + 11: "SOCKET_TCP_INFO", + 12: "SOCKET_TCP_QUICKACK", +} +var SocketOption_SocketOptionName_value = map[string]int32{ + "SOCKET_SO_DEBUG": 1, + "SOCKET_SO_REUSEADDR": 2, + "SOCKET_SO_TYPE": 3, + "SOCKET_SO_ERROR": 4, + "SOCKET_SO_DONTROUTE": 5, + "SOCKET_SO_BROADCAST": 6, + "SOCKET_SO_SNDBUF": 7, + "SOCKET_SO_RCVBUF": 8, + "SOCKET_SO_KEEPALIVE": 9, + "SOCKET_SO_OOBINLINE": 10, + "SOCKET_SO_LINGER": 13, + "SOCKET_SO_RCVTIMEO": 20, + "SOCKET_SO_SNDTIMEO": 21, + "SOCKET_IP_TOS": 1, + "SOCKET_IP_TTL": 2, + "SOCKET_IP_HDRINCL": 3, + "SOCKET_IP_OPTIONS": 4, + "SOCKET_TCP_NODELAY": 1, + "SOCKET_TCP_MAXSEG": 2, + "SOCKET_TCP_CORK": 3, + "SOCKET_TCP_KEEPIDLE": 4, + "SOCKET_TCP_KEEPINTVL": 5, + "SOCKET_TCP_KEEPCNT": 6, + "SOCKET_TCP_SYNCNT": 7, + "SOCKET_TCP_LINGER2": 8, + "SOCKET_TCP_DEFER_ACCEPT": 9, + "SOCKET_TCP_WINDOW_CLAMP": 10, + "SOCKET_TCP_INFO": 11, + "SOCKET_TCP_QUICKACK": 12, +} + +func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { + p := new(SocketOption_SocketOptionName) + *p = x + return p +} +func (x SocketOption_SocketOptionName) String() string { + return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) +} +func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") + if err != nil { + return err + } + *x = SocketOption_SocketOptionName(value) + return nil +} +func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} +} + +type ShutDownRequest_How int32 + +const ( + ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 + ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 + ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 +) + +var ShutDownRequest_How_name = map[int32]string{ + 1: "SOCKET_SHUT_RD", + 2: "SOCKET_SHUT_WR", + 3: "SOCKET_SHUT_RDWR", +} +var ShutDownRequest_How_value = map[string]int32{ + "SOCKET_SHUT_RD": 1, + "SOCKET_SHUT_WR": 2, + "SOCKET_SHUT_RDWR": 3, +} + +func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { + p := new(ShutDownRequest_How) + *p = x + return p +} +func (x ShutDownRequest_How) String() string { + return proto.EnumName(ShutDownRequest_How_name, int32(x)) +} +func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") + if err != nil { + return err + } + *x = ShutDownRequest_How(value) + return nil +} +func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} +} + +type ReceiveRequest_Flags int32 + +const ( + ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 + ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 +) + +var ReceiveRequest_Flags_name = map[int32]string{ + 1: "MSG_OOB", + 2: "MSG_PEEK", +} +var ReceiveRequest_Flags_value = map[string]int32{ + "MSG_OOB": 1, + "MSG_PEEK": 2, +} + +func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { + p := new(ReceiveRequest_Flags) + *p = x + return p +} +func (x ReceiveRequest_Flags) String() string { + return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) +} +func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") + if err != nil { + return err + } + *x = ReceiveRequest_Flags(value) + return nil +} +func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} +} + +type PollEvent_PollEventFlag int32 + +const ( + PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 + PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 + PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 + PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 + PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 + PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 + PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 + PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 + PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 + PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 + PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 + PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 + PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 + PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 +) + +var PollEvent_PollEventFlag_name = map[int32]string{ + 0: "SOCKET_POLLNONE", + 1: "SOCKET_POLLIN", + 2: "SOCKET_POLLPRI", + 4: "SOCKET_POLLOUT", + 8: "SOCKET_POLLERR", + 16: "SOCKET_POLLHUP", + 32: "SOCKET_POLLNVAL", + 64: "SOCKET_POLLRDNORM", + 128: "SOCKET_POLLRDBAND", + 256: "SOCKET_POLLWRNORM", + 512: "SOCKET_POLLWRBAND", + 1024: "SOCKET_POLLMSG", + 4096: "SOCKET_POLLREMOVE", + 8192: "SOCKET_POLLRDHUP", +} +var PollEvent_PollEventFlag_value = map[string]int32{ + "SOCKET_POLLNONE": 0, + "SOCKET_POLLIN": 1, + "SOCKET_POLLPRI": 2, + "SOCKET_POLLOUT": 4, + "SOCKET_POLLERR": 8, + "SOCKET_POLLHUP": 16, + "SOCKET_POLLNVAL": 32, + "SOCKET_POLLRDNORM": 64, + "SOCKET_POLLRDBAND": 128, + "SOCKET_POLLWRNORM": 256, + "SOCKET_POLLWRBAND": 512, + "SOCKET_POLLMSG": 1024, + "SOCKET_POLLREMOVE": 4096, + "SOCKET_POLLRDHUP": 8192, +} + +func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { + p := new(PollEvent_PollEventFlag) + *p = x + return p +} +func (x PollEvent_PollEventFlag) String() string { + return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) +} +func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") + if err != nil { + return err + } + *x = PollEvent_PollEventFlag(value) + return nil +} +func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} +} + +type ResolveReply_ErrorCode int32 + +const ( + ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 + ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 + ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 + ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 + ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 + ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 + ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 + ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 + ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 + ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 + ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 + ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 + ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 + ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 + ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 +) + +var ResolveReply_ErrorCode_name = map[int32]string{ + 1: "SOCKET_EAI_ADDRFAMILY", + 2: "SOCKET_EAI_AGAIN", + 3: "SOCKET_EAI_BADFLAGS", + 4: "SOCKET_EAI_FAIL", + 5: "SOCKET_EAI_FAMILY", + 6: "SOCKET_EAI_MEMORY", + 7: "SOCKET_EAI_NODATA", + 8: "SOCKET_EAI_NONAME", + 9: "SOCKET_EAI_SERVICE", + 10: "SOCKET_EAI_SOCKTYPE", + 11: "SOCKET_EAI_SYSTEM", + 12: "SOCKET_EAI_BADHINTS", + 13: "SOCKET_EAI_PROTOCOL", + 14: "SOCKET_EAI_OVERFLOW", + 15: "SOCKET_EAI_MAX", +} +var ResolveReply_ErrorCode_value = map[string]int32{ + "SOCKET_EAI_ADDRFAMILY": 1, + "SOCKET_EAI_AGAIN": 2, + "SOCKET_EAI_BADFLAGS": 3, + "SOCKET_EAI_FAIL": 4, + "SOCKET_EAI_FAMILY": 5, + "SOCKET_EAI_MEMORY": 6, + "SOCKET_EAI_NODATA": 7, + "SOCKET_EAI_NONAME": 8, + "SOCKET_EAI_SERVICE": 9, + "SOCKET_EAI_SOCKTYPE": 10, + "SOCKET_EAI_SYSTEM": 11, + "SOCKET_EAI_BADHINTS": 12, + "SOCKET_EAI_PROTOCOL": 13, + "SOCKET_EAI_OVERFLOW": 14, + "SOCKET_EAI_MAX": 15, +} + +func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { + p := new(ResolveReply_ErrorCode) + *p = x + return p +} +func (x ResolveReply_ErrorCode) String() string { + return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) +} +func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") + if err != nil { + return err + } + *x = ResolveReply_ErrorCode(value) + return nil +} +func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} +} + +type RemoteSocketServiceError struct { + SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } +func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } +func (*RemoteSocketServiceError) ProtoMessage() {} +func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} +} +func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) +} +func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) +} +func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) +} +func (m *RemoteSocketServiceError) XXX_Size() int { + return xxx_messageInfo_RemoteSocketServiceError.Size(m) +} +func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo + +const Default_RemoteSocketServiceError_SystemError int32 = 0 + +func (m *RemoteSocketServiceError) GetSystemError() int32 { + if m != nil && m.SystemError != nil { + return *m.SystemError + } + return Default_RemoteSocketServiceError_SystemError +} + +func (m *RemoteSocketServiceError) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +type AddressPort struct { + Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` + PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddressPort) Reset() { *m = AddressPort{} } +func (m *AddressPort) String() string { return proto.CompactTextString(m) } +func (*AddressPort) ProtoMessage() {} +func (*AddressPort) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} +} +func (m *AddressPort) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddressPort.Unmarshal(m, b) +} +func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) +} +func (dst *AddressPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddressPort.Merge(dst, src) +} +func (m *AddressPort) XXX_Size() int { + return xxx_messageInfo_AddressPort.Size(m) +} +func (m *AddressPort) XXX_DiscardUnknown() { + xxx_messageInfo_AddressPort.DiscardUnknown(m) +} + +var xxx_messageInfo_AddressPort proto.InternalMessageInfo + +func (m *AddressPort) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *AddressPort) GetPackedAddress() []byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *AddressPort) GetHostnameHint() string { + if m != nil && m.HostnameHint != nil { + return *m.HostnameHint + } + return "" +} + +type CreateSocketRequest struct { + Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` + Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` + ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } +func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSocketRequest) ProtoMessage() {} +func (*CreateSocketRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} +} +func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) +} +func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketRequest.Merge(dst, src) +} +func (m *CreateSocketRequest) XXX_Size() int { + return xxx_messageInfo_CreateSocketRequest.Size(m) +} +func (m *CreateSocketRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo + +const Default_CreateSocketRequest_ListenBacklog int32 = 0 + +func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { + if m != nil && m.Family != nil { + return *m.Family + } + return CreateSocketRequest_IPv4 +} + +func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return CreateSocketRequest_TCP +} + +func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { + if m != nil { + return m.SocketOptions + } + return nil +} + +func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +func (m *CreateSocketRequest) GetListenBacklog() int32 { + if m != nil && m.ListenBacklog != nil { + return *m.ListenBacklog + } + return Default_CreateSocketRequest_ListenBacklog +} + +func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *CreateSocketRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CreateSocketRequest) GetProjectId() int64 { + if m != nil && m.ProjectId != nil { + return *m.ProjectId + } + return 0 +} + +type CreateSocketReply struct { + SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } +func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } +func (*CreateSocketReply) ProtoMessage() {} +func (*CreateSocketReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} +} + +var extRange_CreateSocketReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_CreateSocketReply +} +func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) +} +func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) +} +func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketReply.Merge(dst, src) +} +func (m *CreateSocketReply) XXX_Size() int { + return xxx_messageInfo_CreateSocketReply.Size(m) +} +func (m *CreateSocketReply) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo + +func (m *CreateSocketReply) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CreateSocketReply) GetServerAddress() *AddressPort { + if m != nil { + return m.ServerAddress + } + return nil +} + +func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindRequest) Reset() { *m = BindRequest{} } +func (m *BindRequest) String() string { return proto.CompactTextString(m) } +func (*BindRequest) ProtoMessage() {} +func (*BindRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} +} +func (m *BindRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindRequest.Unmarshal(m, b) +} +func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) +} +func (dst *BindRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindRequest.Merge(dst, src) +} +func (m *BindRequest) XXX_Size() int { + return xxx_messageInfo_BindRequest.Size(m) +} +func (m *BindRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BindRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BindRequest proto.InternalMessageInfo + +func (m *BindRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *BindRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindReply) Reset() { *m = BindReply{} } +func (m *BindReply) String() string { return proto.CompactTextString(m) } +func (*BindReply) ProtoMessage() {} +func (*BindReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} +} +func (m *BindReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindReply.Unmarshal(m, b) +} +func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) +} +func (dst *BindReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindReply.Merge(dst, src) +} +func (m *BindReply) XXX_Size() int { + return xxx_messageInfo_BindReply.Size(m) +} +func (m *BindReply) XXX_DiscardUnknown() { + xxx_messageInfo_BindReply.DiscardUnknown(m) +} + +var xxx_messageInfo_BindReply proto.InternalMessageInfo + +func (m *BindReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetSocketNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } +func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameRequest) ProtoMessage() {} +func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} +} +func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) +} +func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) +} +func (m *GetSocketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketNameRequest.Size(m) +} +func (m *GetSocketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo + +func (m *GetSocketNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetSocketNameReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } +func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameReply) ProtoMessage() {} +func (*GetSocketNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} +} +func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) +} +func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameReply.Merge(dst, src) +} +func (m *GetSocketNameReply) XXX_Size() int { + return xxx_messageInfo_GetSocketNameReply.Size(m) +} +func (m *GetSocketNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo + +func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetPeerNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } +func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameRequest) ProtoMessage() {} +func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} +} +func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) +} +func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) +} +func (m *GetPeerNameRequest) XXX_Size() int { + return xxx_messageInfo_GetPeerNameRequest.Size(m) +} +func (m *GetPeerNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo + +func (m *GetPeerNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetPeerNameReply struct { + PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } +func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameReply) ProtoMessage() {} +func (*GetPeerNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} +} +func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) +} +func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameReply.Merge(dst, src) +} +func (m *GetPeerNameReply) XXX_Size() int { + return xxx_messageInfo_GetPeerNameReply.Size(m) +} +func (m *GetPeerNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo + +func (m *GetPeerNameReply) GetPeerIp() *AddressPort { + if m != nil { + return m.PeerIp + } + return nil +} + +type SocketOption struct { + Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` + Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SocketOption) Reset() { *m = SocketOption{} } +func (m *SocketOption) String() string { return proto.CompactTextString(m) } +func (*SocketOption) ProtoMessage() {} +func (*SocketOption) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} +} +func (m *SocketOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SocketOption.Unmarshal(m, b) +} +func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) +} +func (dst *SocketOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_SocketOption.Merge(dst, src) +} +func (m *SocketOption) XXX_Size() int { + return xxx_messageInfo_SocketOption.Size(m) +} +func (m *SocketOption) XXX_DiscardUnknown() { + xxx_messageInfo_SocketOption.DiscardUnknown(m) +} + +var xxx_messageInfo_SocketOption proto.InternalMessageInfo + +func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { + if m != nil && m.Level != nil { + return *m.Level + } + return SocketOption_SOCKET_SOL_IP +} + +func (m *SocketOption) GetOption() SocketOption_SocketOptionName { + if m != nil && m.Option != nil { + return *m.Option + } + return SocketOption_SOCKET_SO_DEBUG +} + +func (m *SocketOption) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type SetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } +func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsRequest) ProtoMessage() {} +func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} +} +func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) +} +func (m *SetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsRequest.Size(m) +} +func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo + +func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type SetSocketOptionsReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } +func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsReply) ProtoMessage() {} +func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} +} +func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) +} +func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) +} +func (m *SetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsReply.Size(m) +} +func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo + +type GetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } +func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsRequest) ProtoMessage() {} +func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} +} +func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) +} +func (m *GetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsRequest.Size(m) +} +func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo + +func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type GetSocketOptionsReply struct { + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } +func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsReply) ProtoMessage() {} +func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} +} +func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) +} +func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) +} +func (m *GetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsReply.Size(m) +} +func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo + +func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type ConnectRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} +} +func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) +} +func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) +} +func (dst *ConnectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectRequest.Merge(dst, src) +} +func (m *ConnectRequest) XXX_Size() int { + return xxx_messageInfo_ConnectRequest.Size(m) +} +func (m *ConnectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo + +const Default_ConnectRequest_TimeoutSeconds float64 = -1 + +func (m *ConnectRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ConnectRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *ConnectRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ConnectRequest_TimeoutSeconds +} + +type ConnectReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectReply) Reset() { *m = ConnectReply{} } +func (m *ConnectReply) String() string { return proto.CompactTextString(m) } +func (*ConnectReply) ProtoMessage() {} +func (*ConnectReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} +} + +var extRange_ConnectReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ConnectReply +} +func (m *ConnectReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectReply.Unmarshal(m, b) +} +func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) +} +func (dst *ConnectReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectReply.Merge(dst, src) +} +func (m *ConnectReply) XXX_Size() int { + return xxx_messageInfo_ConnectReply.Size(m) +} +func (m *ConnectReply) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectReply proto.InternalMessageInfo + +func (m *ConnectReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type ListenRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} +func (*ListenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} +} +func (m *ListenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenRequest.Unmarshal(m, b) +} +func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) +} +func (dst *ListenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenRequest.Merge(dst, src) +} +func (m *ListenRequest) XXX_Size() int { + return xxx_messageInfo_ListenRequest.Size(m) +} +func (m *ListenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenRequest proto.InternalMessageInfo + +func (m *ListenRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ListenRequest) GetBacklog() int32 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +type ListenReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenReply) Reset() { *m = ListenReply{} } +func (m *ListenReply) String() string { return proto.CompactTextString(m) } +func (*ListenReply) ProtoMessage() {} +func (*ListenReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} +} +func (m *ListenReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenReply.Unmarshal(m, b) +} +func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) +} +func (dst *ListenReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenReply.Merge(dst, src) +} +func (m *ListenReply) XXX_Size() int { + return xxx_messageInfo_ListenReply.Size(m) +} +func (m *ListenReply) XXX_DiscardUnknown() { + xxx_messageInfo_ListenReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenReply proto.InternalMessageInfo + +type AcceptRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } +func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } +func (*AcceptRequest) ProtoMessage() {} +func (*AcceptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} +} +func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) +} +func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) +} +func (dst *AcceptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptRequest.Merge(dst, src) +} +func (m *AcceptRequest) XXX_Size() int { + return xxx_messageInfo_AcceptRequest.Size(m) +} +func (m *AcceptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo + +const Default_AcceptRequest_TimeoutSeconds float64 = -1 + +func (m *AcceptRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *AcceptRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_AcceptRequest_TimeoutSeconds +} + +type AcceptReply struct { + NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` + RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptReply) Reset() { *m = AcceptReply{} } +func (m *AcceptReply) String() string { return proto.CompactTextString(m) } +func (*AcceptReply) ProtoMessage() {} +func (*AcceptReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} +} +func (m *AcceptReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptReply.Unmarshal(m, b) +} +func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) +} +func (dst *AcceptReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptReply.Merge(dst, src) +} +func (m *AcceptReply) XXX_Size() int { + return xxx_messageInfo_AcceptReply.Size(m) +} +func (m *AcceptReply) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptReply.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptReply proto.InternalMessageInfo + +func (m *AcceptReply) GetNewSocketDescriptor() []byte { + if m != nil { + return m.NewSocketDescriptor + } + return nil +} + +func (m *AcceptReply) GetRemoteAddress() *AddressPort { + if m != nil { + return m.RemoteAddress + } + return nil +} + +type ShutDownRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` + SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } +func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutDownRequest) ProtoMessage() {} +func (*ShutDownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} +} +func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) +} +func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) +} +func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownRequest.Merge(dst, src) +} +func (m *ShutDownRequest) XXX_Size() int { + return xxx_messageInfo_ShutDownRequest.Size(m) +} +func (m *ShutDownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo + +func (m *ShutDownRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ShutDownRequest) GetHow() ShutDownRequest_How { + if m != nil && m.How != nil { + return *m.How + } + return ShutDownRequest_SOCKET_SHUT_RD +} + +func (m *ShutDownRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return 0 +} + +type ShutDownReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } +func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } +func (*ShutDownReply) ProtoMessage() {} +func (*ShutDownReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} +} +func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) +} +func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) +} +func (dst *ShutDownReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownReply.Merge(dst, src) +} +func (m *ShutDownReply) XXX_Size() int { + return xxx_messageInfo_ShutDownReply.Size(m) +} +func (m *ShutDownReply) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo + +type CloseRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} +} +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (dst *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(dst, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +const Default_CloseRequest_SendOffset int64 = -1 + +func (m *CloseRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CloseRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return Default_CloseRequest_SendOffset +} + +type CloseReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseReply) Reset() { *m = CloseReply{} } +func (m *CloseReply) String() string { return proto.CompactTextString(m) } +func (*CloseReply) ProtoMessage() {} +func (*CloseReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} +} +func (m *CloseReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseReply.Unmarshal(m, b) +} +func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) +} +func (dst *CloseReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseReply.Merge(dst, src) +} +func (m *CloseReply) XXX_Size() int { + return xxx_messageInfo_CloseReply.Size(m) +} +func (m *CloseReply) XXX_DiscardUnknown() { + xxx_messageInfo_CloseReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseReply proto.InternalMessageInfo + +type SendRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` + StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` + SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendRequest) Reset() { *m = SendRequest{} } +func (m *SendRequest) String() string { return proto.CompactTextString(m) } +func (*SendRequest) ProtoMessage() {} +func (*SendRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} +} +func (m *SendRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendRequest.Unmarshal(m, b) +} +func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) +} +func (dst *SendRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendRequest.Merge(dst, src) +} +func (m *SendRequest) XXX_Size() int { + return xxx_messageInfo_SendRequest.Size(m) +} +func (m *SendRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendRequest proto.InternalMessageInfo + +const Default_SendRequest_Flags int32 = 0 +const Default_SendRequest_TimeoutSeconds float64 = -1 + +func (m *SendRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SendRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *SendRequest) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *SendRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_SendRequest_Flags +} + +func (m *SendRequest) GetSendTo() *AddressPort { + if m != nil { + return m.SendTo + } + return nil +} + +func (m *SendRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_SendRequest_TimeoutSeconds +} + +type SendReply struct { + DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendReply) Reset() { *m = SendReply{} } +func (m *SendReply) String() string { return proto.CompactTextString(m) } +func (*SendReply) ProtoMessage() {} +func (*SendReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} +} +func (m *SendReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendReply.Unmarshal(m, b) +} +func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) +} +func (dst *SendReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendReply.Merge(dst, src) +} +func (m *SendReply) XXX_Size() int { + return xxx_messageInfo_SendReply.Size(m) +} +func (m *SendReply) XXX_DiscardUnknown() { + xxx_messageInfo_SendReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SendReply proto.InternalMessageInfo + +func (m *SendReply) GetDataSent() int32 { + if m != nil && m.DataSent != nil { + return *m.DataSent + } + return 0 +} + +type ReceiveRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` + Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } +func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*ReceiveRequest) ProtoMessage() {} +func (*ReceiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} +} +func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) +} +func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) +} +func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveRequest.Merge(dst, src) +} +func (m *ReceiveRequest) XXX_Size() int { + return xxx_messageInfo_ReceiveRequest.Size(m) +} +func (m *ReceiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo + +const Default_ReceiveRequest_Flags int32 = 0 +const Default_ReceiveRequest_TimeoutSeconds float64 = -1 + +func (m *ReceiveRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ReceiveRequest) GetDataSize() int32 { + if m != nil && m.DataSize != nil { + return *m.DataSize + } + return 0 +} + +func (m *ReceiveRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_ReceiveRequest_Flags +} + +func (m *ReceiveRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ReceiveRequest_TimeoutSeconds +} + +type ReceiveReply struct { + StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` + BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } +func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } +func (*ReceiveReply) ProtoMessage() {} +func (*ReceiveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} +} +func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) +} +func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) +} +func (dst *ReceiveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveReply.Merge(dst, src) +} +func (m *ReceiveReply) XXX_Size() int { + return xxx_messageInfo_ReceiveReply.Size(m) +} +func (m *ReceiveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo + +func (m *ReceiveReply) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *ReceiveReply) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReceiveReply) GetReceivedFrom() *AddressPort { + if m != nil { + return m.ReceivedFrom + } + return nil +} + +func (m *ReceiveReply) GetBufferSize() int32 { + if m != nil && m.BufferSize != nil { + return *m.BufferSize + } + return 0 +} + +type PollEvent struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` + ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollEvent) Reset() { *m = PollEvent{} } +func (m *PollEvent) String() string { return proto.CompactTextString(m) } +func (*PollEvent) ProtoMessage() {} +func (*PollEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} +} +func (m *PollEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollEvent.Unmarshal(m, b) +} +func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) +} +func (dst *PollEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollEvent.Merge(dst, src) +} +func (m *PollEvent) XXX_Size() int { + return xxx_messageInfo_PollEvent.Size(m) +} +func (m *PollEvent) XXX_DiscardUnknown() { + xxx_messageInfo_PollEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_PollEvent proto.InternalMessageInfo + +func (m *PollEvent) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *PollEvent) GetRequestedEvents() int32 { + if m != nil && m.RequestedEvents != nil { + return *m.RequestedEvents + } + return 0 +} + +func (m *PollEvent) GetObservedEvents() int32 { + if m != nil && m.ObservedEvents != nil { + return *m.ObservedEvents + } + return 0 +} + +type PollRequest struct { + Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} +func (*PollRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} +} +func (m *PollRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollRequest.Unmarshal(m, b) +} +func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) +} +func (dst *PollRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollRequest.Merge(dst, src) +} +func (m *PollRequest) XXX_Size() int { + return xxx_messageInfo_PollRequest.Size(m) +} +func (m *PollRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PollRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PollRequest proto.InternalMessageInfo + +const Default_PollRequest_TimeoutSeconds float64 = -1 + +func (m *PollRequest) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *PollRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_PollRequest_TimeoutSeconds +} + +type PollReply struct { + Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollReply) Reset() { *m = PollReply{} } +func (m *PollReply) String() string { return proto.CompactTextString(m) } +func (*PollReply) ProtoMessage() {} +func (*PollReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} +} +func (m *PollReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollReply.Unmarshal(m, b) +} +func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) +} +func (dst *PollReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollReply.Merge(dst, src) +} +func (m *PollReply) XXX_Size() int { + return xxx_messageInfo_PollReply.Size(m) +} +func (m *PollReply) XXX_DiscardUnknown() { + xxx_messageInfo_PollReply.DiscardUnknown(m) +} + +var xxx_messageInfo_PollReply proto.InternalMessageInfo + +func (m *PollReply) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +type ResolveRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } +func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveRequest) ProtoMessage() {} +func (*ResolveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} +} +func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) +} +func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) +} +func (dst *ResolveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveRequest.Merge(dst, src) +} +func (m *ResolveRequest) XXX_Size() int { + return xxx_messageInfo_ResolveRequest.Size(m) +} +func (m *ResolveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo + +func (m *ResolveRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { + if m != nil { + return m.AddressFamilies + } + return nil +} + +type ResolveReply struct { + PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` + Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveReply) Reset() { *m = ResolveReply{} } +func (m *ResolveReply) String() string { return proto.CompactTextString(m) } +func (*ResolveReply) ProtoMessage() {} +func (*ResolveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} +} +func (m *ResolveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveReply.Unmarshal(m, b) +} +func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) +} +func (dst *ResolveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveReply.Merge(dst, src) +} +func (m *ResolveReply) XXX_Size() int { + return xxx_messageInfo_ResolveReply.Size(m) +} +func (m *ResolveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveReply proto.InternalMessageInfo + +func (m *ResolveReply) GetPackedAddress() [][]byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *ResolveReply) GetCanonicalName() string { + if m != nil && m.CanonicalName != nil { + return *m.CanonicalName + } + return "" +} + +func (m *ResolveReply) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func init() { + proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") + proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") + proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") + proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") + proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") + proto.RegisterType((*BindReply)(nil), "appengine.BindReply") + proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") + proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") + proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") + proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") + proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") + proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") + proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") + proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") + proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") + proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") + proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") + proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") + proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") + proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") + proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") + proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") + proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") + proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") + proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") + proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") + proto.RegisterType((*SendReply)(nil), "appengine.SendReply") + proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") + proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") + proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") + proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") + proto.RegisterType((*PollReply)(nil), "appengine.PollReply") + proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") + proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) +} + +var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ + // 3088 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, + 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, + 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, + 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, + 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, + 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, + 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, + 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, + 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, + 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, + 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, + 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, + 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, + 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, + 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, + 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, + 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, + 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, + 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, + 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, + 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, + 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, + 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, + 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, + 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, + 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, + 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, + 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, + 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, + 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, + 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, + 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, + 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, + 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, + 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, + 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, + 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, + 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, + 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, + 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, + 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, + 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, + 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, + 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, + 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, + 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, + 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, + 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, + 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, + 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, + 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, + 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, + 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, + 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, + 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, + 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, + 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, + 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, + 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, + 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, + 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, + 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, + 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, + 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, + 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, + 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, + 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, + 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, + 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, + 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, + 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, + 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, + 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, + 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, + 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, + 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, + 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, + 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, + 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, + 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, + 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, + 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, + 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, + 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, + 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, + 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, + 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, + 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, + 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, + 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, + 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, + 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, + 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, + 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, + 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, + 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, + 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, + 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, + 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, + 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, + 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, + 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, + 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, + 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, + 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, + 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, + 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, + 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, + 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, + 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, + 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, + 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, + 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, + 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, + 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, + 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, + 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, + 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, + 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, + 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, + 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, + 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, + 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, + 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, + 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, + 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, + 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, + 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, + 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, + 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, + 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, + 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, + 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, + 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, + 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, + 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, + 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, + 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, + 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, + 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, + 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, + 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, + 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, + 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, + 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, + 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, + 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, + 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, + 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, + 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, + 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, + 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, + 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, + 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, + 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, + 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, + 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, + 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, + 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, + 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, + 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, + 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, + 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, + 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, + 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, + 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, + 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, + 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, + 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, + 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, + 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, + 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, + 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, + 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, + 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, + 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, + 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, + 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, + 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, + 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, + 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, + 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, + 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, + 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, + 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, + 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, + 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, + 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, + 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, + 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, + 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, + 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, + 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 000000000..21860ca08 --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go new file mode 100644 index 000000000..3de46df82 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/doc.go @@ -0,0 +1,10 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package socket provides outbound network sockets. +// +// This package is only required in the classic App Engine environment. +// Applications running only in App Engine "flexible environment" should +// use the standard library's net package. +package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go new file mode 100644 index 000000000..0ad50e2d3 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -0,0 +1,290 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package socket + +import ( + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/appengine/internal" + + pb "google.golang.org/appengine/internal/socket" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + return DialTimeout(ctx, protocol, addr, 0) +} + +var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ + pb.CreateSocketRequest_IPv4, + pb.CreateSocketRequest_IPv6, +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. + if timeout > 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 000000000..c804169a1 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 000000000..05642a992 --- /dev/null +++ b/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 000000000..9521b50e9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,54 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/annotations.proto + +package annotations // import "google.golang.org/genproto/googleapis/api/annotations" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { + proto.RegisterFile("google/api/annotations.proto", fileDescriptor_annotations_55609bb51d80951d) +} + +var fileDescriptor_annotations_55609bb51d80951d = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, + 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, + 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, + 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go new file mode 100644 index 000000000..ba2d33649 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -0,0 +1,76 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/client.proto + +package annotations // import "google.golang.org/genproto/googleapis/api/annotations" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +var E_DefaultHost = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host,json=defaultHost", + Filename: "google/api/client.proto", +} + +var E_OauthScopes = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes,json=oauthScopes", + Filename: "google/api/client.proto", +} + +var E_MethodSignature = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature,json=methodSignature", + Filename: "google/api/client.proto", +} + +func init() { + proto.RegisterExtension(E_DefaultHost) + proto.RegisterExtension(E_OauthScopes) + proto.RegisterExtension(E_MethodSignature) +} + +func init() { proto.RegisterFile("google/api/client.proto", fileDescriptor_client_3b795ae10f05c1f5) } + +var fileDescriptor_client_3b795ae10f05c1f5 = []byte{ + // 262 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x3f, 0x4f, 0xc3, 0x30, + 0x10, 0xc5, 0x55, 0x40, 0xa8, 0x75, 0x11, 0xa0, 0x2c, 0x20, 0x06, 0xc8, 0xd8, 0xc9, 0x1e, 0xd8, + 0xca, 0xd4, 0x76, 0xe0, 0x8f, 0x84, 0x88, 0x9a, 0x8d, 0x25, 0x72, 0x9d, 0xab, 0x63, 0x29, 0xf5, + 0x59, 0xf6, 0x85, 0xef, 0x02, 0x6c, 0x7c, 0x52, 0x54, 0xc7, 0x11, 0x48, 0x0c, 0x6c, 0x27, 0xbd, + 0xf7, 0xfb, 0x9d, 0xf4, 0xd8, 0x85, 0x46, 0xd4, 0x2d, 0x08, 0xe9, 0x8c, 0x50, 0xad, 0x01, 0x4b, + 0xdc, 0x79, 0x24, 0xcc, 0x58, 0x1f, 0x70, 0xe9, 0xcc, 0x55, 0x9e, 0x4a, 0x31, 0xd9, 0x74, 0x5b, + 0x51, 0x43, 0x50, 0xde, 0x38, 0x42, 0xdf, 0xb7, 0xe7, 0x2b, 0x76, 0x52, 0xc3, 0x56, 0x76, 0x2d, + 0x55, 0x0d, 0x06, 0xca, 0x6e, 0x78, 0xc2, 0x07, 0x84, 0x97, 0xe0, 0xdf, 0x8c, 0x82, 0x17, 0x47, + 0x06, 0x6d, 0xb8, 0x7c, 0x1f, 0xe7, 0xa3, 0xd9, 0x64, 0x3d, 0x4d, 0xd4, 0x03, 0x06, 0xda, 0x4b, + 0x50, 0x76, 0xd4, 0x54, 0x41, 0xa1, 0x83, 0xf0, 0xbf, 0xe4, 0x23, 0x49, 0x22, 0x55, 0x46, 0x68, + 0xfe, 0xc4, 0xce, 0x77, 0x40, 0x0d, 0xd6, 0x55, 0x30, 0xda, 0x4a, 0xea, 0x3c, 0x64, 0xd7, 0x7f, + 0x44, 0xcf, 0xb1, 0x32, 0x78, 0x3e, 0xc7, 0xf9, 0xe1, 0x6c, 0xb2, 0x3e, 0xeb, 0xc1, 0x72, 0xe0, + 0x96, 0x86, 0x9d, 0x2a, 0xdc, 0xf1, 0x9f, 0x25, 0x96, 0xd3, 0x55, 0xdc, 0xa8, 0xd8, 0x7b, 0x8a, + 0xd1, 0xeb, 0x22, 0x45, 0x1a, 0x5b, 0x69, 0x35, 0x47, 0xaf, 0x85, 0x06, 0x1b, 0xbf, 0x88, 0x3e, + 0x92, 0xce, 0x84, 0x38, 0xae, 0xb4, 0x16, 0x49, 0xc6, 0x7f, 0x77, 0xbf, 0xee, 0xaf, 0x83, 0xa3, + 0xfb, 0x45, 0xf1, 0xb8, 0x39, 0x8e, 0xd0, 0xed, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x22, 0xa8, + 0x2d, 0x33, 0x90, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go new file mode 100644 index 000000000..734dde11a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/field_behavior.proto + +package annotations // import "google.golang.org/genproto/googleapis/api/annotations" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +type FieldBehavior int32 + +const ( + // Conventional default for enums. Do not use this. + FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + FieldBehavior_OPTIONAL FieldBehavior = 1 + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + FieldBehavior_REQUIRED FieldBehavior = 2 + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + FieldBehavior_INPUT_ONLY FieldBehavior = 4 + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + FieldBehavior_IMMUTABLE FieldBehavior = 5 +) + +var FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", +} +var FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, +} + +func (x FieldBehavior) String() string { + return proto.EnumName(FieldBehavior_name, int32(x)) +} +func (FieldBehavior) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_field_behavior_11f2d3859577a83d, []int{0} +} + +var E_FieldBehavior = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,json=fieldBehavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", +} + +func init() { + proto.RegisterEnum("google.api.FieldBehavior", FieldBehavior_name, FieldBehavior_value) + proto.RegisterExtension(E_FieldBehavior) +} + +func init() { + proto.RegisterFile("google/api/field_behavior.proto", fileDescriptor_field_behavior_11f2d3859577a83d) +} + +var fileDescriptor_field_behavior_11f2d3859577a83d = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4f, 0xb3, 0x30, + 0x1c, 0xc7, 0x9f, 0xfd, 0x79, 0xcc, 0xac, 0x0e, 0x49, 0x4f, 0xba, 0x44, 0xdd, 0xd1, 0x78, 0x28, + 0x89, 0xde, 0xf4, 0x04, 0xae, 0xd3, 0x26, 0x8c, 0x56, 0x04, 0x13, 0xbd, 0x60, 0xb7, 0xb1, 0xda, + 0x64, 0xd2, 0x06, 0xd0, 0x8b, 0x6f, 0xc5, 0x93, 0xaf, 0xd4, 0xd0, 0x31, 0x85, 0x5b, 0xbf, 0xf9, + 0x7d, 0xfa, 0xeb, 0xe7, 0x5b, 0x70, 0x2a, 0x94, 0x12, 0xeb, 0xd4, 0xe1, 0x5a, 0x3a, 0x2b, 0x99, + 0xae, 0x97, 0xc9, 0x3c, 0x7d, 0xe5, 0x1f, 0x52, 0xe5, 0x48, 0xe7, 0xaa, 0x54, 0x10, 0x6c, 0x00, + 0xc4, 0xb5, 0x1c, 0x8d, 0x6b, 0xd8, 0x4c, 0xe6, 0xef, 0x2b, 0x67, 0x99, 0x16, 0x8b, 0x5c, 0xea, + 0x72, 0x4b, 0x9f, 0x7f, 0x82, 0xe1, 0xb4, 0xda, 0xe2, 0xd5, 0x4b, 0xe0, 0x09, 0x18, 0x4d, 0x09, + 0xf6, 0x27, 0x89, 0x87, 0xef, 0xdc, 0x47, 0x42, 0xc3, 0x24, 0x0e, 0x1e, 0x18, 0xbe, 0x21, 0x53, + 0x82, 0x27, 0xf6, 0x3f, 0xb8, 0x0f, 0x06, 0x94, 0x45, 0x84, 0x06, 0xae, 0x6f, 0x77, 0xaa, 0x14, + 0xe2, 0xfb, 0x98, 0x84, 0x78, 0x62, 0x77, 0xe1, 0x01, 0xd8, 0xa3, 0x71, 0xc4, 0xe2, 0x28, 0xa1, + 0x81, 0xff, 0x64, 0xf7, 0xa0, 0x05, 0x00, 0x09, 0x7e, 0x73, 0x1f, 0x0e, 0xc1, 0x2e, 0x99, 0xcd, + 0xe2, 0xc8, 0xf5, 0x7c, 0x6c, 0xff, 0xbf, 0x7a, 0x01, 0x56, 0xbb, 0x02, 0x3c, 0x46, 0xb5, 0xfd, + 0xd6, 0x18, 0x19, 0x3b, 0xaa, 0x4b, 0xa9, 0xb2, 0xe2, 0xf0, 0x6b, 0x30, 0xee, 0x9d, 0x59, 0x17, + 0x47, 0xe8, 0xaf, 0x23, 0x6a, 0xe9, 0x87, 0xc3, 0x55, 0x33, 0x7a, 0x1a, 0x58, 0x0b, 0xf5, 0xd6, + 0xc0, 0x3d, 0xd8, 0xe2, 0x59, 0xf5, 0x0c, 0xeb, 0x3c, 0xbb, 0x35, 0x21, 0xd4, 0x9a, 0x67, 0x02, + 0xa9, 0x5c, 0x38, 0x22, 0xcd, 0x8c, 0x84, 0xb3, 0x19, 0x71, 0x2d, 0x0b, 0xf3, 0xe9, 0x3c, 0xcb, + 0x54, 0xc9, 0x8d, 0xcf, 0x75, 0xe3, 0xfc, 0xdd, 0xed, 0xdf, 0xba, 0x8c, 0xcc, 0x77, 0xcc, 0xa5, + 0xcb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x94, 0x57, 0x94, 0xa8, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 000000000..1a8a27b65 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,688 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/http.proto + +package annotations // import "google.golang.org/genproto/googleapis/api/annotations" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parmeters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Http) Reset() { *m = Http{} } +func (m *Http) String() string { return proto.CompactTextString(m) } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { + return fileDescriptor_http_e457621dddd7365b, []int{0} +} +func (m *Http) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Http.Unmarshal(m, b) +} +func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Http.Marshal(b, m, deterministic) +} +func (dst *Http) XXX_Merge(src proto.Message) { + xxx_messageInfo_Http.Merge(dst, src) +} +func (m *Http) XXX_Size() int { + return xxx_messageInfo_Http.Size(m) +} +func (m *Http) XXX_DiscardUnknown() { + xxx_messageInfo_Http.DiscardUnknown(m) +} + +var xxx_messageInfo_Http proto.InternalMessageInfo + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Http) GetFullyDecodeReservedExpansion() bool { + if m != nil { + return m.FullyDecodeReservedExpansion + } + return false +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST API methods. The mapping specifies how different portions of the RPC +// request message are mapped to URL path, URL query parameters, and +// HTTP request body. The mapping is typically specified as an +// `google.api.http` annotation on the RPC method, +// see "google/api/annotations.proto" for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it indicates there is no HTTP request body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) fields. +// 4. Any body sent with an HTTP request can contain only (b) fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. The syntax `**` matches zero +// or more path segments, which must be the last part of the path except the +// `Verb`. The syntax `LITERAL` matches literal text in the path. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path, all characters +// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the +// Discovery Document as `{var}`. +// +// If a variable contains one or more path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path, all +// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables +// show up in the Discovery Document as `{+var}`. +// +// NOTE: While the single segment variable matches the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 +// Simple String Expansion, the multi segment variable **does not** match +// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. +type HttpRule struct { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP body, or + // `*` for mapping all fields not captured by the path pattern to the HTTP + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // body of response. Other response fields are ignored. When + // not set, the response message will be used as HTTP body of response. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (m *HttpRule) String() string { return proto.CompactTextString(m) } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { + return fileDescriptor_http_e457621dddd7365b, []int{1} +} +func (m *HttpRule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HttpRule.Unmarshal(m, b) +} +func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) +} +func (dst *HttpRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpRule.Merge(dst, src) +} +func (m *HttpRule) XXX_Size() int { + return xxx_messageInfo_HttpRule.Size(m) +} +func (m *HttpRule) XXX_DiscardUnknown() { + xxx_messageInfo_HttpRule.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpRule proto.InternalMessageInfo + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetResponseBody() string { + if m != nil { + return m.ResponseBody + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Get) + case *HttpRule_Put: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Put) + case *HttpRule_Post: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Post) + case *HttpRule_Delete: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Delete) + case *HttpRule_Patch: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Patch) + case *HttpRule_Custom: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) + } + return nil +} + +func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HttpRule) + switch tag { + case 2: // pattern.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Get{x} + return true, err + case 3: // pattern.put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Put{x} + return true, err + case 4: // pattern.post + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Post{x} + return true, err + case 5: // pattern.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Delete{x} + return true, err + case 6: // pattern.patch + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Patch{x} + return true, err + case 8: // pattern.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomHttpPattern) + err := b.DecodeMessage(msg) + m.Pattern = &HttpRule_Custom{msg} + return true, err + default: + return false, nil + } +} + +func _HttpRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Get))) + n += len(x.Get) + case *HttpRule_Put: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Put))) + n += len(x.Put) + case *HttpRule_Post: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Post))) + n += len(x.Post) + case *HttpRule_Delete: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Delete))) + n += len(x.Delete) + case *HttpRule_Patch: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Patch))) + n += len(x.Patch) + case *HttpRule_Custom: + s := proto.Size(x.Custom) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return fileDescriptor_http_e457621dddd7365b, []int{2} +} +func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b) +} +func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) +} +func (dst *CustomHttpPattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomHttpPattern.Merge(dst, src) +} +func (m *CustomHttpPattern) XXX_Size() int { + return xxx_messageInfo_CustomHttpPattern.Size(m) +} +func (m *CustomHttpPattern) XXX_DiscardUnknown() { + xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_http_e457621dddd7365b) } + +var fileDescriptor_http_e457621dddd7365b = []byte{ + // 419 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52, + 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37, + 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d, + 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b, + 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e, + 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e, + 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea, + 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc, + 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55, + 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1, + 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6, + 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52, + 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef, + 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55, + 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42, + 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22, + 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a, + 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65, + 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b, + 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63, + 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec, + 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea, + 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18, + 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd, + 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac, + 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go new file mode 100644 index 000000000..6a143986e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/resource.proto + +package annotations // import "google.golang.org/genproto/googleapis/api/annotations" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An annotation designating that this field designates a One Platform +// resource. +// +// Example: +// +// message Topic { +// string name = 1 [(google.api.resource) = { +// name: "projects/{project}/topics/{topic}" +// }]; +// } +type Resource struct { + // Required. The resource's name template. + // + // Examples: + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` + // The name that should be used in code to describe the resource, + // in PascalCase. + // + // If omitted, this is inferred from the name of the message. + // This is required if the resource is being defined without the context + // of a message (see `resource_definition`, below). + // + // Example: + // option (google.api.resource_definition) = { + // pattern: "projects/{project}" + // symbol: "Project" + // }; + Symbol string `protobuf:"bytes,2,opt,name=symbol,proto3" json:"symbol,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_resource_1eea87ac7ff402a7, []int{0} +} +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (dst *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(dst, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Resource) GetSymbol() string { + if m != nil { + return m.Symbol + } + return "" +} + +var E_Resource = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*Resource)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", +} + +var E_ResourceReference = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference,json=resourceReference", + Filename: "google/api/resource.proto", +} + +var E_ResourceDefinition = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: ([]*Resource)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition,json=resourceDefinition", + Filename: "google/api/resource.proto", +} + +func init() { + proto.RegisterType((*Resource)(nil), "google.api.Resource") + proto.RegisterExtension(E_Resource) + proto.RegisterExtension(E_ResourceReference) + proto.RegisterExtension(E_ResourceDefinition) +} + +func init() { proto.RegisterFile("google/api/resource.proto", fileDescriptor_resource_1eea87ac7ff402a7) } + +var fileDescriptor_resource_1eea87ac7ff402a7 = []byte{ + // 334 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40, + 0x18, 0x85, 0x49, 0xef, 0xa5, 0xcd, 0x9d, 0xab, 0x82, 0xa3, 0x48, 0x94, 0x16, 0x8a, 0xab, 0x2e, + 0x64, 0x06, 0x74, 0x57, 0xdd, 0xa4, 0x88, 0xe2, 0x42, 0x1a, 0xb2, 0x74, 0x23, 0xd3, 0x64, 0x3a, + 0x8c, 0xa4, 0xf3, 0x0f, 0x93, 0xe9, 0x42, 0x4b, 0x1f, 0x45, 0x04, 0x1f, 0xc3, 0x47, 0xea, 0x53, + 0x48, 0x27, 0x99, 0x98, 0x85, 0xe2, 0xee, 0x3f, 0x9c, 0x39, 0xe7, 0x3b, 0x81, 0xa0, 0x63, 0x01, + 0x20, 0x0a, 0x4e, 0x99, 0x96, 0xd4, 0xf0, 0x12, 0x96, 0x26, 0xe3, 0x44, 0x1b, 0xb0, 0x80, 0x51, + 0x65, 0x11, 0xa6, 0xe5, 0xc9, 0xb0, 0x7e, 0xe6, 0x9c, 0xd9, 0x72, 0x4e, 0x73, 0x5e, 0x66, 0x46, + 0x6a, 0x0b, 0xa6, 0x7a, 0x7d, 0x7a, 0x85, 0xc2, 0xb4, 0xce, 0xe3, 0x08, 0xf5, 0x34, 0xb3, 0x96, + 0x1b, 0x15, 0x05, 0xc3, 0x60, 0xf4, 0x2f, 0xf5, 0x12, 0x1f, 0xa1, 0x6e, 0xf9, 0xbc, 0x98, 0x41, + 0x11, 0x75, 0x9c, 0x51, 0xab, 0x71, 0x82, 0x42, 0x4f, 0xc7, 0x03, 0x52, 0x83, 0x3d, 0x8c, 0xdc, + 0x48, 0x5e, 0xe4, 0x53, 0x6d, 0x25, 0xa8, 0x32, 0x7a, 0x0d, 0x87, 0xc1, 0xe8, 0xff, 0xf9, 0x21, + 0xf9, 0x9a, 0x47, 0x3c, 0x39, 0x6d, 0x5a, 0xc6, 0xf7, 0x08, 0xfb, 0xfb, 0xd1, 0xf0, 0x39, 0x37, + 0x5c, 0xfd, 0xde, 0xfd, 0x16, 0xba, 0x55, 0xfb, 0x3e, 0x99, 0xfa, 0xe0, 0x38, 0x47, 0x07, 0x4d, + 0x5d, 0xce, 0xe7, 0x52, 0xc9, 0x6d, 0x02, 0xf7, 0xbf, 0xe9, 0x2b, 0x78, 0x6b, 0xea, 0x9f, 0x1f, + 0xa7, 0x36, 0xf3, 0xae, 0x9b, 0xba, 0xc9, 0x47, 0xb0, 0x89, 0x07, 0x08, 0x6b, 0x03, 0x4f, 0x3c, + 0xb3, 0x25, 0x5d, 0xd5, 0xd7, 0x1a, 0xf7, 0x92, 0xea, 0xda, 0xc4, 0x67, 0xa8, 0x0f, 0x46, 0x30, + 0x25, 0x5f, 0x98, 0xa3, 0xd0, 0x55, 0x5b, 0xae, 0xf1, 0xce, 0xb4, 0x25, 0xd1, 0x5e, 0x06, 0x8b, + 0x16, 0x7e, 0xb2, 0xeb, 0xf9, 0xc9, 0x76, 0x70, 0x12, 0x3c, 0xc4, 0xb5, 0x29, 0xa0, 0x60, 0x4a, + 0x10, 0x30, 0x82, 0x0a, 0xae, 0xdc, 0xe7, 0xd0, 0xca, 0x62, 0x5a, 0x96, 0xee, 0xff, 0x60, 0x4a, + 0x81, 0xad, 0xa0, 0x97, 0xad, 0xfb, 0xbd, 0xf3, 0xf7, 0x36, 0x4e, 0xee, 0x66, 0x5d, 0x17, 0xba, + 0xf8, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x7e, 0x96, 0xa2, 0x53, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go new file mode 100644 index 000000000..98af383ae --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -0,0 +1,714 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/distribution.proto + +package distribution // import "google.golang.org/genproto/googleapis/api/distribution" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Distribution` contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those values +// across a set of buckets. +// +// The summary statistics are the count, mean, sum of the squared deviation from +// the mean, the minimum, and the maximum of the set of population of values. +// The histogram is based on a sequence of buckets and gives a count of values +// that fall into each bucket. The boundaries of the buckets are given either +// explicitly or by formulas for buckets of fixed or exponentially increasing +// widths. +// +// Although it is not forbidden, it is generally a bad idea to include +// non-finite values (infinities or NaNs) in the population of values, as this +// will render the `mean` and `sum_of_squared_deviation` fields meaningless. +type Distribution struct { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in `bucket_counts` if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The arithmetic mean of the values in the population. If `count` is zero + // then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If `count` is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // If specified, contains the range of the population values. The field + // must not be present if the `count` is zero. + Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` + // Defines the histogram bucket boundaries. If the distribution does not + // contain a histogram, then omit this field. + BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // The number of values in each bucket of the histogram, as described in + // `bucket_options`. If the distribution does not have a histogram, then omit + // this field. If there is a histogram, then the sum of the values in + // `bucket_counts` must equal the value in the `count` field of the + // distribution. + // + // If present, `bucket_counts` should contain N values, where N is the number + // of buckets specified in `bucket_options`. If you supply fewer than N + // values, the remaining values are assumed to be 0. + // + // The order of the values in `bucket_counts` follows the bucket numbering + // schemes described for the three bucket types. The first value must be the + // count for the underflow bucket (number 0). The next N-2 values are the + // counts for the finite buckets (number 1 through N-2). The N'th value in + // `bucket_counts` is the count for the overflow bucket (number N-1). + BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // Must be in increasing order of `value` field. + Exemplars []*Distribution_Exemplar `protobuf:"bytes,10,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution) Reset() { *m = Distribution{} } +func (m *Distribution) String() string { return proto.CompactTextString(m) } +func (*Distribution) ProtoMessage() {} +func (*Distribution) Descriptor() ([]byte, []int) { + return fileDescriptor_distribution_56c6a1d50d474e70, []int{0} +} +func (m *Distribution) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution.Unmarshal(m, b) +} +func (m *Distribution) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution.Marshal(b, m, deterministic) +} +func (dst *Distribution) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution.Merge(dst, src) +} +func (m *Distribution) XXX_Size() int { + return xxx_messageInfo_Distribution.Size(m) +} +func (m *Distribution) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution proto.InternalMessageInfo + +func (m *Distribution) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Distribution) GetMean() float64 { + if m != nil { + return m.Mean + } + return 0 +} + +func (m *Distribution) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *Distribution) GetRange() *Distribution_Range { + if m != nil { + return m.Range + } + return nil +} + +func (m *Distribution) GetBucketOptions() *Distribution_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *Distribution) GetBucketCounts() []int64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *Distribution) GetExemplars() []*Distribution_Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// The range of the population values. +type Distribution_Range struct { + // The minimum of the population values. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // The maximum of the population values. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_Range) Reset() { *m = Distribution_Range{} } +func (m *Distribution_Range) String() string { return proto.CompactTextString(m) } +func (*Distribution_Range) ProtoMessage() {} +func (*Distribution_Range) Descriptor() ([]byte, []int) { + return fileDescriptor_distribution_56c6a1d50d474e70, []int{0, 0} +} +func (m *Distribution_Range) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_Range.Unmarshal(m, b) +} +func (m *Distribution_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_Range.Marshal(b, m, deterministic) +} +func (dst *Distribution_Range) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_Range.Merge(dst, src) +} +func (m *Distribution_Range) XXX_Size() int { + return xxx_messageInfo_Distribution_Range.Size(m) +} +func (m *Distribution_Range) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_Range.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_Range proto.InternalMessageInfo + +func (m *Distribution_Range) GetMin() float64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *Distribution_Range) GetMax() float64 { + if m != nil { + return m.Max + } + return 0 +} + +// `BucketOptions` describes the bucket boundaries used to create a histogram +// for the distribution. The buckets can be in a linear sequence, an +// exponential sequence, or each bucket can be specified explicitly. +// `BucketOptions` does not include the number of values in each bucket. +// +// A bucket has an inclusive lower bound and exclusive upper bound for the +// values that are counted for that bucket. The upper bound of a bucket must +// be strictly greater than the lower bound. The sequence of N buckets for a +// distribution consists of an underflow bucket (number 0), zero or more +// finite buckets (number 1 through N - 2) and an overflow bucket (number N - +// 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the +// same as the upper bound of bucket i - 1. The buckets span the whole range +// of finite values: lower bound of the underflow bucket is -infinity and the +// upper bound of the overflow bucket is +infinity. The finite buckets are +// so-called because both bounds are finite. +type Distribution_BucketOptions struct { + // Exactly one of these three fields must be set. + // + // Types that are valid to be assigned to Options: + // *Distribution_BucketOptions_LinearBuckets + // *Distribution_BucketOptions_ExponentialBuckets + // *Distribution_BucketOptions_ExplicitBuckets + Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions) Reset() { *m = Distribution_BucketOptions{} } +func (m *Distribution_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions) ProtoMessage() {} +func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_distribution_56c6a1d50d474e70, []int{0, 1} +} +func (m *Distribution_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions.Marshal(b, m, deterministic) +} +func (dst *Distribution_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions.Merge(dst, src) +} +func (m *Distribution_BucketOptions) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions.Size(m) +} +func (m *Distribution_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions proto.InternalMessageInfo + +type isDistribution_BucketOptions_Options interface { + isDistribution_BucketOptions_Options() +} + +type Distribution_BucketOptions_LinearBuckets struct { + LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExponentialBuckets struct { + ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExplicitBuckets struct { + ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"` +} + +func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} + +func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { + if m != nil { + return m.Options + } + return nil +} + +func (m *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { + return x.LinearBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { + return x.ExponentialBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { + return x.ExplicitBuckets + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Distribution_BucketOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Distribution_BucketOptions_OneofMarshaler, _Distribution_BucketOptions_OneofUnmarshaler, _Distribution_BucketOptions_OneofSizer, []interface{}{ + (*Distribution_BucketOptions_LinearBuckets)(nil), + (*Distribution_BucketOptions_ExponentialBuckets)(nil), + (*Distribution_BucketOptions_ExplicitBuckets)(nil), + } +} + +func _Distribution_BucketOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Distribution_BucketOptions) + // options + switch x := m.Options.(type) { + case *Distribution_BucketOptions_LinearBuckets: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LinearBuckets); err != nil { + return err + } + case *Distribution_BucketOptions_ExponentialBuckets: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExponentialBuckets); err != nil { + return err + } + case *Distribution_BucketOptions_ExplicitBuckets: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExplicitBuckets); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Distribution_BucketOptions.Options has unexpected type %T", x) + } + return nil +} + +func _Distribution_BucketOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Distribution_BucketOptions) + switch tag { + case 1: // options.linear_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Linear) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_LinearBuckets{msg} + return true, err + case 2: // options.exponential_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Exponential) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_ExponentialBuckets{msg} + return true, err + case 3: // options.explicit_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Explicit) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_ExplicitBuckets{msg} + return true, err + default: + return false, nil + } +} + +func _Distribution_BucketOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Distribution_BucketOptions) + // options + switch x := m.Options.(type) { + case *Distribution_BucketOptions_LinearBuckets: + s := proto.Size(x.LinearBuckets) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_BucketOptions_ExponentialBuckets: + s := proto.Size(x.ExponentialBuckets) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_BucketOptions_ExplicitBuckets: + s := proto.Size(x.ExplicitBuckets) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies a linear sequence of buckets that all have the same width +// (except overflow and underflow). Each bucket represents a constant +// absolute uncertainty on the specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): offset + (width * i). +// Lower bound (1 <= i < N): offset + (width * (i - 1)). +type Distribution_BucketOptions_Linear struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 0. + Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"` + // Lower bound of the first bucket. + Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Linear) Reset() { *m = Distribution_BucketOptions_Linear{} } +func (m *Distribution_BucketOptions_Linear) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Linear) ProtoMessage() {} +func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { + return fileDescriptor_distribution_56c6a1d50d474e70, []int{0, 1, 0} +} +func (m *Distribution_BucketOptions_Linear) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Linear) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Marshal(b, m, deterministic) +} +func (dst *Distribution_BucketOptions_Linear) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Linear.Merge(dst, src) +} +func (m *Distribution_BucketOptions_Linear) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Size(m) +} +func (m *Distribution_BucketOptions_Linear) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Linear.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Linear proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetWidth() float64 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetOffset() float64 { + if m != nil { + return m.Offset + } + return 0 +} + +// Specifies an exponential sequence of buckets that have a width that is +// proportional to the value of the lower bound. Each bucket represents a +// constant relative uncertainty on a specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +type Distribution_BucketOptions_Exponential struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 1. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"` + // Must be greater than 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Exponential) Reset() { + *m = Distribution_BucketOptions_Exponential{} +} +func (m *Distribution_BucketOptions_Exponential) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} +func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { + return fileDescriptor_distribution_56c6a1d50d474e70, []int{0, 1, 1} +} +func (m *Distribution_BucketOptions_Exponential) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Marshal(b, m, deterministic) +} +func (dst *Distribution_BucketOptions_Exponential) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Exponential.Merge(dst, src) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Size(m) +} +func (m *Distribution_BucketOptions_Exponential) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Exponential.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Exponential proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { + if m != nil { + return m.GrowthFactor + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetScale() float64 { + if m != nil { + return m.Scale + } + return 0 +} + +// Specifies a set of buckets with arbitrary widths. +// +// There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following +// boundaries: +// +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] +// +// The `bounds` field must contain at least one element. If `bounds` has +// only one element, then there are no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +type Distribution_BucketOptions_Explicit struct { + // The values must be monotonically increasing. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Explicit) Reset() { *m = Distribution_BucketOptions_Explicit{} } +func (m *Distribution_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} +func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_distribution_56c6a1d50d474e70, []int{0, 1, 2} +} +func (m *Distribution_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (dst *Distribution_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Explicit.Merge(dst, src) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Size(m) +} +func (m *Distribution_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +// Exemplars are example points that may be used to annotate aggregated +// distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket, such as a trace ID that +// was active when a value was added. They may contain further information, +// such as a example values and timestamps, origin, etc. +type Distribution_Exemplar struct { + // Value of the exemplar point. This value determines to which bucket the + // exemplar belongs. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. Examples are: + // + // Trace ID: type.googleapis.com/google.devtools.cloudtrace.v1.Trace + // + // Literal string: type.googleapis.com/google.protobuf.StringValue + // + // Labels dropped during aggregation: + // type.googleapis.com/google.monitoring.v3.DroppedLabels + // + // There may be only a single attachment of any given message type in a + // single exemplar, and this is enforced by the system. + Attachments []*any.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_Exemplar) Reset() { *m = Distribution_Exemplar{} } +func (m *Distribution_Exemplar) String() string { return proto.CompactTextString(m) } +func (*Distribution_Exemplar) ProtoMessage() {} +func (*Distribution_Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_distribution_56c6a1d50d474e70, []int{0, 2} +} +func (m *Distribution_Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_Exemplar.Unmarshal(m, b) +} +func (m *Distribution_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_Exemplar.Marshal(b, m, deterministic) +} +func (dst *Distribution_Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_Exemplar.Merge(dst, src) +} +func (m *Distribution_Exemplar) XXX_Size() int { + return xxx_messageInfo_Distribution_Exemplar.Size(m) +} +func (m *Distribution_Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_Exemplar proto.InternalMessageInfo + +func (m *Distribution_Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Distribution_Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *Distribution_Exemplar) GetAttachments() []*any.Any { + if m != nil { + return m.Attachments + } + return nil +} + +func init() { + proto.RegisterType((*Distribution)(nil), "google.api.Distribution") + proto.RegisterType((*Distribution_Range)(nil), "google.api.Distribution.Range") + proto.RegisterType((*Distribution_BucketOptions)(nil), "google.api.Distribution.BucketOptions") + proto.RegisterType((*Distribution_BucketOptions_Linear)(nil), "google.api.Distribution.BucketOptions.Linear") + proto.RegisterType((*Distribution_BucketOptions_Exponential)(nil), "google.api.Distribution.BucketOptions.Exponential") + proto.RegisterType((*Distribution_BucketOptions_Explicit)(nil), "google.api.Distribution.BucketOptions.Explicit") + proto.RegisterType((*Distribution_Exemplar)(nil), "google.api.Distribution.Exemplar") +} + +func init() { + proto.RegisterFile("google/api/distribution.proto", fileDescriptor_distribution_56c6a1d50d474e70) +} + +var fileDescriptor_distribution_56c6a1d50d474e70 = []byte{ + // 631 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xed, 0x6a, 0xd4, 0x40, + 0x14, 0x6d, 0x9a, 0xdd, 0x6d, 0x7b, 0xb7, 0x5b, 0xeb, 0x58, 0x25, 0x06, 0xd4, 0xb5, 0x05, 0x59, + 0x50, 0xb3, 0xb0, 0x8a, 0x0a, 0xfe, 0x90, 0x6e, 0x3f, 0xac, 0xa0, 0xb4, 0x8c, 0xe2, 0x0f, 0x11, + 0xc2, 0x6c, 0x76, 0x92, 0x0e, 0x26, 0x33, 0x69, 0x32, 0x69, 0xb7, 0xaf, 0xe1, 0x23, 0xf8, 0x16, + 0xbe, 0x8a, 0x4f, 0x23, 0xf3, 0x91, 0x6e, 0x6a, 0x29, 0xd4, 0x7f, 0xb9, 0xf7, 0x9c, 0x7b, 0xce, + 0xbd, 0x73, 0x67, 0x02, 0x0f, 0x12, 0x21, 0x92, 0x94, 0x0e, 0x49, 0xce, 0x86, 0x53, 0x56, 0xca, + 0x82, 0x4d, 0x2a, 0xc9, 0x04, 0x0f, 0xf2, 0x42, 0x48, 0x81, 0xc0, 0xc0, 0x01, 0xc9, 0x99, 0x7f, + 0xdf, 0x52, 0x35, 0x32, 0xa9, 0xe2, 0x21, 0xe1, 0xe7, 0x86, 0xe6, 0x3f, 0xfa, 0x17, 0x92, 0x2c, + 0xa3, 0xa5, 0x24, 0x59, 0x6e, 0x08, 0x9b, 0x7f, 0x96, 0x61, 0x75, 0xb7, 0x21, 0x8f, 0x36, 0xa0, + 0x1d, 0x89, 0x8a, 0x4b, 0xcf, 0xe9, 0x3b, 0x03, 0x17, 0x9b, 0x00, 0x21, 0x68, 0x65, 0x94, 0x70, + 0x6f, 0xb1, 0xef, 0x0c, 0x1c, 0xac, 0xbf, 0xd1, 0x6b, 0xf0, 0xca, 0x2a, 0x0b, 0x45, 0x1c, 0x96, + 0x27, 0x15, 0x29, 0xe8, 0x34, 0x9c, 0xd2, 0x53, 0x46, 0x94, 0x8a, 0xe7, 0x6a, 0xde, 0xdd, 0xb2, + 0xca, 0x0e, 0xe3, 0xcf, 0x06, 0xdd, 0xad, 0x41, 0xf4, 0x12, 0xda, 0x05, 0xe1, 0x09, 0xf5, 0x5a, + 0x7d, 0x67, 0xd0, 0x1d, 0x3d, 0x0c, 0xe6, 0xb3, 0x04, 0xcd, 0x5e, 0x02, 0xac, 0x58, 0xd8, 0x90, + 0xd1, 0x27, 0x58, 0x9b, 0x54, 0xd1, 0x0f, 0x2a, 0x43, 0x91, 0x2b, 0xb4, 0xf4, 0x3a, 0xba, 0xfc, + 0xc9, 0xb5, 0xe5, 0x63, 0x4d, 0x3f, 0x34, 0x6c, 0xdc, 0x9b, 0x34, 0x43, 0xb4, 0x05, 0x36, 0x11, + 0xea, 0x09, 0x4b, 0x6f, 0xa9, 0xef, 0x0e, 0x5c, 0xbc, 0x6a, 0x92, 0x3b, 0x3a, 0x87, 0xde, 0xc1, + 0x0a, 0x9d, 0xd1, 0x2c, 0x4f, 0x49, 0x51, 0x7a, 0xd0, 0x77, 0x07, 0xdd, 0xd1, 0xe3, 0x6b, 0xed, + 0xf6, 0x2c, 0x13, 0xcf, 0x6b, 0xfc, 0xa7, 0xd0, 0xd6, 0x43, 0xa0, 0x75, 0x70, 0x33, 0xc6, 0xf5, + 0xa1, 0x3a, 0x58, 0x7d, 0xea, 0x0c, 0x99, 0xd9, 0x13, 0x55, 0x9f, 0xfe, 0xef, 0x16, 0xf4, 0x2e, + 0xf5, 0x8c, 0xbe, 0xc2, 0x5a, 0xca, 0x38, 0x25, 0x45, 0x68, 0xda, 0x2a, 0xb5, 0x40, 0x77, 0xf4, + 0xfc, 0x66, 0x33, 0x07, 0x1f, 0x75, 0xf1, 0xc1, 0x02, 0xee, 0x19, 0x19, 0x83, 0x96, 0x88, 0xc2, + 0x1d, 0x3a, 0xcb, 0x05, 0xa7, 0x5c, 0x32, 0x92, 0x5e, 0x88, 0x2f, 0x6a, 0xf1, 0xd1, 0x0d, 0xc5, + 0xf7, 0xe6, 0x0a, 0x07, 0x0b, 0x18, 0x35, 0x04, 0x6b, 0x9b, 0xef, 0xb0, 0x4e, 0x67, 0x79, 0xca, + 0x22, 0x26, 0x2f, 0x3c, 0x5c, 0xed, 0x31, 0xbc, 0xb9, 0x87, 0x2e, 0x3f, 0x58, 0xc0, 0xb7, 0x6a, + 0x29, 0xab, 0xee, 0x4f, 0xa1, 0x63, 0xe6, 0x43, 0xcf, 0x00, 0xf1, 0x2a, 0x0b, 0x63, 0xc6, 0x99, + 0xa4, 0x97, 0x8e, 0xaa, 0x8d, 0xd7, 0x79, 0x95, 0xed, 0x6b, 0xa0, 0xee, 0x6a, 0x03, 0xda, 0x67, + 0x6c, 0x2a, 0x8f, 0xed, 0xd1, 0x9b, 0x00, 0xdd, 0x83, 0x8e, 0x88, 0xe3, 0x92, 0x4a, 0x7b, 0x77, + 0x6d, 0xe4, 0x9f, 0x42, 0xb7, 0x31, 0xe8, 0x7f, 0x5a, 0x6d, 0x41, 0x2f, 0x29, 0xc4, 0x99, 0x3c, + 0x0e, 0x63, 0x12, 0x49, 0x51, 0x58, 0xcb, 0x55, 0x93, 0xdc, 0xd7, 0x39, 0xd5, 0x4f, 0x19, 0x91, + 0x94, 0x5a, 0x63, 0x13, 0xf8, 0x9b, 0xb0, 0x5c, 0x0f, 0xaf, 0x7a, 0x9b, 0x88, 0x8a, 0x4f, 0x95, + 0x91, 0xab, 0x7a, 0x33, 0xd1, 0x78, 0x05, 0x96, 0xec, 0x5b, 0xf0, 0x7f, 0x3a, 0x8a, 0x6f, 0xae, + 0x9d, 0x52, 0x3c, 0x25, 0x69, 0x45, 0xed, 0x75, 0x33, 0x01, 0x7a, 0x03, 0x2b, 0x17, 0xaf, 0xdf, + 0xae, 0xda, 0xaf, 0xd7, 0x50, 0xff, 0x1f, 0x82, 0x2f, 0x35, 0x03, 0xcf, 0xc9, 0xe8, 0x15, 0x74, + 0x89, 0x94, 0x24, 0x3a, 0xce, 0x28, 0xd7, 0x2b, 0x54, 0x0f, 0x61, 0xe3, 0x4a, 0xed, 0x36, 0x3f, + 0xc7, 0x4d, 0xe2, 0xf8, 0x04, 0xd6, 0x22, 0x91, 0x35, 0x56, 0x3d, 0xbe, 0xdd, 0xdc, 0xf5, 0x91, + 0x2a, 0x3c, 0x72, 0xbe, 0xed, 0x58, 0x42, 0x22, 0x52, 0xc2, 0x93, 0x40, 0x14, 0xc9, 0x30, 0xa1, + 0x5c, 0xcb, 0x0e, 0x0d, 0x44, 0x72, 0x56, 0x5e, 0xf9, 0x13, 0xbe, 0x6d, 0x06, 0xbf, 0x16, 0x5b, + 0xef, 0xb7, 0x8f, 0x3e, 0x4c, 0x3a, 0xba, 0xec, 0xc5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, + 0xf1, 0xc2, 0x23, 0x3f, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go new file mode 100644 index 000000000..cae68236f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -0,0 +1,134 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/label.proto + +package label // import "google.golang.org/genproto/googleapis/api/label" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Value types that can be used as label values. +type LabelDescriptor_ValueType int32 + +const ( + // A variable-length string. This is the default. + LabelDescriptor_STRING LabelDescriptor_ValueType = 0 + // Boolean; true or false. + LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 + // A 64-bit signed integer. + LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 +) + +var LabelDescriptor_ValueType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT64", +} +var LabelDescriptor_ValueType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT64": 2, +} + +func (x LabelDescriptor_ValueType) String() string { + return proto.EnumName(LabelDescriptor_ValueType_name, int32(x)) +} +func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_label_7ab1ab74ef036f1c, []int{0, 0} +} + +// A description of a label. +type LabelDescriptor struct { + // The label key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The type of data that can be assigned to the label. + ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` + // A human-readable description for the label. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelDescriptor) Reset() { *m = LabelDescriptor{} } +func (m *LabelDescriptor) String() string { return proto.CompactTextString(m) } +func (*LabelDescriptor) ProtoMessage() {} +func (*LabelDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_label_7ab1ab74ef036f1c, []int{0} +} +func (m *LabelDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelDescriptor.Unmarshal(m, b) +} +func (m *LabelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelDescriptor.Marshal(b, m, deterministic) +} +func (dst *LabelDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelDescriptor.Merge(dst, src) +} +func (m *LabelDescriptor) XXX_Size() int { + return xxx_messageInfo_LabelDescriptor.Size(m) +} +func (m *LabelDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_LabelDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelDescriptor proto.InternalMessageInfo + +func (m *LabelDescriptor) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return LabelDescriptor_STRING +} + +func (m *LabelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*LabelDescriptor)(nil), "google.api.LabelDescriptor") + proto.RegisterEnum("google.api.LabelDescriptor_ValueType", LabelDescriptor_ValueType_name, LabelDescriptor_ValueType_value) +} + +func init() { proto.RegisterFile("google/api/label.proto", fileDescriptor_label_7ab1ab74ef036f1c) } + +var fileDescriptor_label_7ab1ab74ef036f1c = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0xd1, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x88, 0xeb, 0x25, 0x16, 0x64, 0x2a, 0xed, 0x64, 0xe4, 0xe2, 0xf7, + 0x01, 0xc9, 0xb9, 0xa4, 0x16, 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x09, 0x09, 0x70, 0x31, + 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0x98, 0x42, 0x2e, 0x5c, 0x5c, + 0x65, 0x89, 0x39, 0xa5, 0xa9, 0xf1, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x7c, + 0x46, 0xaa, 0x7a, 0x08, 0x63, 0xf4, 0xd0, 0x8c, 0xd0, 0x0b, 0x03, 0xa9, 0x0e, 0xa9, 0x2c, 0x48, + 0x0d, 0xe2, 0x2c, 0x83, 0x31, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xa0, 0x4a, 0x32, 0xf3, 0xf3, 0x24, + 0x98, 0xc1, 0xe6, 0x23, 0x0b, 0x29, 0xe9, 0x70, 0x71, 0xc2, 0x75, 0x0a, 0x71, 0x71, 0xb1, 0x05, + 0x87, 0x04, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x08, 0x71, 0x70, 0xb1, 0x38, 0xf9, 0xfb, 0xfb, 0x08, + 0x30, 0x0a, 0x71, 0x72, 0xb1, 0x7a, 0xfa, 0x85, 0x98, 0x99, 0x08, 0x30, 0x39, 0xc5, 0x73, 0xf1, + 0x25, 0xe7, 0xe7, 0x22, 0x39, 0xc3, 0x89, 0x0b, 0xec, 0x8e, 0x00, 0x90, 0x2f, 0x03, 0x18, 0xa3, + 0x4c, 0xa1, 0x32, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, 0xe9, 0xa9, + 0x79, 0xe0, 0x30, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x23, 0x82, 0xc7, 0x1a, 0x4c, 0xfe, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xe2, 0xee, 0x18, 0xe0, 0x99, 0xc4, 0x06, 0x56, 0x6b, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0x57, 0x04, 0xaa, 0x1f, 0x49, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go new file mode 100644 index 000000000..f2ed88844 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -0,0 +1,389 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/metric.proto + +package metric // import "google.golang.org/genproto/googleapis/api/metric" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import label "google.golang.org/genproto/googleapis/api/label" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The kind of measurement. It describes how the data is reported. +type MetricDescriptor_MetricKind int32 + +const ( + // Do not use this default value. + MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 + // An instantaneous measurement of a value. + MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 + // The change in a value during a time interval. + MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 + // A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 +) + +var MetricDescriptor_MetricKind_name = map[int32]string{ + 0: "METRIC_KIND_UNSPECIFIED", + 1: "GAUGE", + 2: "DELTA", + 3: "CUMULATIVE", +} +var MetricDescriptor_MetricKind_value = map[string]int32{ + "METRIC_KIND_UNSPECIFIED": 0, + "GAUGE": 1, + "DELTA": 2, + "CUMULATIVE": 3, +} + +func (x MetricDescriptor_MetricKind) String() string { + return proto.EnumName(MetricDescriptor_MetricKind_name, int32(x)) +} +func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metric_18b95065d4b7f9c0, []int{0, 0} +} + +// The value type of a metric. +type MetricDescriptor_ValueType int32 + +const ( + // Do not use this default value. + MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 + // The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 + // The value is a signed 64-bit integer. + MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 + // The value is a double precision floating point number. + MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 + // The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_STRING MetricDescriptor_ValueType = 4 + // The value is a [`Distribution`][google.api.Distribution]. + MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 + // The value is money. + MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 +) + +var MetricDescriptor_ValueType_name = map[int32]string{ + 0: "VALUE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "DOUBLE", + 4: "STRING", + 5: "DISTRIBUTION", + 6: "MONEY", +} +var MetricDescriptor_ValueType_value = map[string]int32{ + "VALUE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "DOUBLE": 3, + "STRING": 4, + "DISTRIBUTION": 5, + "MONEY": 6, +} + +func (x MetricDescriptor_ValueType) String() string { + return proto.EnumName(MetricDescriptor_ValueType_name, int32(x)) +} +func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metric_18b95065d4b7f9c0, []int{0, 1} +} + +// Defines a metric type and its schema. Once a metric descriptor is created, +// deleting or altering it stops data collection and makes the metric type's +// existing data unusable. +type MetricDescriptor struct { + // The resource name of the metric descriptor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined custom metric types have the DNS name + // `custom.googleapis.com`. Metric types should use a natural hierarchical + // grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` + // The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + // Whether the metric records instantaneous values, changes to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // Whether the measurement is an integer, a floating-point number, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The unit in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The + // supported units are a subset of [The Unified Code for Units of + // Measure](http://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10**3) + // * `M` mega (10**6) + // * `G` giga (10**9) + // * `T` tera (10**12) + // * `P` peta (10**15) + // * `E` exa (10**18) + // * `Z` zetta (10**21) + // * `Y` yotta (10**24) + // * `m` milli (10**-3) + // * `u` micro (10**-6) + // * `n` nano (10**-9) + // * `p` pico (10**-12) + // * `f` femto (10**-15) + // * `a` atto (10**-18) + // * `z` zepto (10**-21) + // * `y` yocto (10**-24) + // * `Ki` kibi (2**10) + // * `Mi` mebi (2**20) + // * `Gi` gibi (2**30) + // * `Ti` tebi (2**40) + // + // **Grammar** + // + // The grammar also includes these connectors: + // + // * `/` division (as an infix operator, e.g. `1/s`). + // * `.` multiplication (as an infix operator, e.g. `GBy.d`) + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // * `Annotation` is just a comment if it follows a `UNIT` and is + // equivalent to `1` if it is used alone. For examples, + // `{requests}/s == 1/s`, `By{transmitted}/s == By/s`. + // * `NAME` is a sequence of non-blank printable ASCII characters not + // containing '{' or '}'. + // * `1` represents dimensionless value 1, such as in `1/s`. + // * `%` represents dimensionless value 1/100, and annotates values giving + // a percentage. + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + // A concise name for the metric, which can be displayed in user interfaces. + // Use sentence case without an ending period, for example "Request count". + // This field is optional but it is recommended to be set for any metrics + // associated with user-visible concepts, such as Quota. + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_18b95065d4b7f9c0, []int{0} +} +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (dst *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(dst, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MetricDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +func (m *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// A specific metric, identified by specifying values for all of the +// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. +type Metric struct { + // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor]. + // For example, `custom.googleapis.com/invoice/paid/amount`. + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // The set of label values that uniquely identify this metric. All + // labels listed in the `MetricDescriptor` must be assigned values. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_18b95065d4b7f9c0, []int{1} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (dst *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(dst, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Metric) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*MetricDescriptor)(nil), "google.api.MetricDescriptor") + proto.RegisterType((*Metric)(nil), "google.api.Metric") + proto.RegisterMapType((map[string]string)(nil), "google.api.Metric.LabelsEntry") + proto.RegisterEnum("google.api.MetricDescriptor_MetricKind", MetricDescriptor_MetricKind_name, MetricDescriptor_MetricKind_value) + proto.RegisterEnum("google.api.MetricDescriptor_ValueType", MetricDescriptor_ValueType_name, MetricDescriptor_ValueType_value) +} + +func init() { proto.RegisterFile("google/api/metric.proto", fileDescriptor_metric_18b95065d4b7f9c0) } + +var fileDescriptor_metric_18b95065d4b7f9c0 = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x4d, 0x6f, 0xda, 0x40, + 0x10, 0xad, 0x3f, 0x70, 0xc3, 0x10, 0xa1, 0xd5, 0xaa, 0x4a, 0x2c, 0x22, 0x55, 0x94, 0x43, 0xcb, + 0x09, 0xa4, 0xa4, 0x4a, 0xbf, 0x4e, 0x80, 0xb7, 0xd4, 0x8a, 0xb1, 0x91, 0x63, 0x23, 0xa5, 0x17, + 0xcb, 0x81, 0x95, 0x65, 0xc5, 0xd8, 0xae, 0x71, 0x22, 0xf9, 0x57, 0xf4, 0x17, 0xf4, 0xd2, 0x5f, + 0x5a, 0xed, 0xae, 0x03, 0x16, 0x95, 0x72, 0xe2, 0xed, 0x9b, 0x37, 0x6f, 0x67, 0x96, 0x67, 0x38, + 0x8f, 0xb2, 0x2c, 0x4a, 0xe8, 0x38, 0xcc, 0xe3, 0xf1, 0x96, 0x96, 0x45, 0xbc, 0x1e, 0xe5, 0x45, + 0x56, 0x66, 0x18, 0x44, 0x61, 0x14, 0xe6, 0x71, 0xef, 0xac, 0x21, 0x4a, 0xc2, 0x7b, 0x9a, 0x08, + 0xcd, 0xe0, 0x8f, 0x0a, 0x68, 0xc1, 0x9b, 0x0c, 0xba, 0x5b, 0x17, 0x71, 0x5e, 0x66, 0x05, 0xc6, + 0xa0, 0xa6, 0xe1, 0x96, 0xea, 0x52, 0x5f, 0x1a, 0xb6, 0x5d, 0x8e, 0x19, 0x57, 0x56, 0x39, 0xd5, + 0x4f, 0x04, 0xc7, 0x30, 0xbe, 0x02, 0x8d, 0x7b, 0xed, 0x74, 0xb9, 0xaf, 0x0c, 0x3b, 0x97, 0x17, + 0xa3, 0xc3, 0x8d, 0x23, 0x8b, 0x55, 0x0e, 0xa6, 0x6e, 0x2d, 0xc5, 0x3f, 0xa0, 0x23, 0xa6, 0x0c, + 0x1e, 0xe2, 0x74, 0xa3, 0x2b, 0x7d, 0x69, 0xd8, 0xbd, 0xfc, 0xd0, 0xec, 0x3c, 0x9e, 0xa7, 0x26, + 0x6e, 0xe2, 0x74, 0xe3, 0xc2, 0x76, 0x8f, 0x31, 0x01, 0x78, 0x0a, 0x93, 0x47, 0x1a, 0xf0, 0xc1, + 0x54, 0x6e, 0xf4, 0xfe, 0x45, 0xa3, 0x15, 0x93, 0x7b, 0x55, 0x4e, 0xdd, 0xf6, 0xd3, 0x33, 0x64, + 0x9b, 0x3d, 0xa6, 0x71, 0xa9, 0xb7, 0xc4, 0x66, 0x0c, 0xe3, 0x3e, 0x74, 0x36, 0x75, 0x5b, 0x9c, + 0xa5, 0xba, 0xc6, 0x4b, 0x4d, 0x0a, 0xbf, 0x83, 0xd3, 0x4d, 0xbc, 0xcb, 0x93, 0xb0, 0x0a, 0xf8, + 0x5b, 0xbd, 0xae, 0x25, 0x82, 0xb3, 0xc3, 0x2d, 0x1d, 0x38, 0x00, 0x87, 0xc9, 0xf1, 0x05, 0x9c, + 0x2f, 0x88, 0xe7, 0x9a, 0xb3, 0xe0, 0xc6, 0xb4, 0x8d, 0xc0, 0xb7, 0x6f, 0x97, 0x64, 0x66, 0x7e, + 0x37, 0x89, 0x81, 0x5e, 0xe1, 0x36, 0xb4, 0xe6, 0x13, 0x7f, 0x4e, 0x90, 0xc4, 0xa0, 0x41, 0x2c, + 0x6f, 0x82, 0x64, 0xdc, 0x05, 0x98, 0xf9, 0x0b, 0xdf, 0x9a, 0x78, 0xe6, 0x8a, 0x20, 0x65, 0xf0, + 0x0b, 0xda, 0xfb, 0x0d, 0x70, 0x0f, 0xce, 0x56, 0x13, 0xcb, 0x27, 0x81, 0x77, 0xb7, 0x24, 0x47, + 0x76, 0x27, 0xa0, 0x4e, 0x1d, 0xc7, 0x12, 0x6e, 0xa6, 0xed, 0x5d, 0x7f, 0x44, 0x32, 0x06, 0xd0, + 0x0c, 0xc7, 0x9f, 0x5a, 0x04, 0x29, 0x0c, 0xdf, 0x7a, 0xae, 0x69, 0xcf, 0x91, 0x8a, 0x11, 0x9c, + 0x1a, 0x26, 0x3b, 0x4d, 0x7d, 0xcf, 0x74, 0x6c, 0xd4, 0x62, 0x4d, 0x0b, 0xc7, 0x26, 0x77, 0x48, + 0x1b, 0xfc, 0x96, 0x40, 0x13, 0x4b, 0xec, 0x13, 0xa0, 0x34, 0x12, 0x70, 0x7d, 0x94, 0x80, 0xb7, + 0xff, 0x3f, 0xbf, 0x08, 0xc2, 0x8e, 0xa4, 0x65, 0x51, 0x3d, 0x87, 0xa0, 0xf7, 0x05, 0x3a, 0x0d, + 0x1a, 0x23, 0x50, 0x1e, 0x68, 0x55, 0xe7, 0x8d, 0x41, 0xfc, 0x06, 0x5a, 0xfc, 0x1f, 0xd2, 0x65, + 0xce, 0x89, 0xc3, 0x57, 0xf9, 0xb3, 0x34, 0x0d, 0xa0, 0xbb, 0xce, 0xb6, 0x8d, 0x7b, 0xa6, 0x1d, + 0x71, 0xd1, 0x92, 0x05, 0x7a, 0x29, 0xfd, 0xfc, 0x54, 0x97, 0xa2, 0x2c, 0x09, 0xd3, 0x68, 0x94, + 0x15, 0xd1, 0x38, 0xa2, 0x29, 0x8f, 0xfb, 0x58, 0x94, 0xc2, 0x3c, 0xde, 0x35, 0x3e, 0x97, 0x6f, + 0xe2, 0xe7, 0xaf, 0xac, 0xce, 0x27, 0x4b, 0xf3, 0x5e, 0xe3, 0xd2, 0xab, 0x7f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x18, 0x04, 0x05, 0x82, 0x58, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go new file mode 100644 index 000000000..653c7ae4a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -0,0 +1,289 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/monitored_resource.proto + +package monitoredres // import "google.golang.org/genproto/googleapis/api/monitoredres" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" +import label "google.golang.org/genproto/googleapis/api/label" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a +// type name and a set of labels. For example, the monitored resource +// descriptor for Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // The maximum length of this value is 256 characters. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} } +func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) } +func (*MonitoredResourceDescriptor) ProtoMessage() {} +func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_monitored_resource_35ee91132be0d9ce, []int{0} +} +func (m *MonitoredResourceDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResourceDescriptor.Unmarshal(m, b) +} +func (m *MonitoredResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResourceDescriptor.Marshal(b, m, deterministic) +} +func (dst *MonitoredResourceDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResourceDescriptor.Merge(dst, src) +} +func (m *MonitoredResourceDescriptor) XXX_Size() int { + return xxx_messageInfo_MonitoredResourceDescriptor.Size(m) +} +func (m *MonitoredResourceDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResourceDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResourceDescriptor proto.InternalMessageInfo + +func (m *MonitoredResourceDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's +// schema. Information in the `labels` field identifies the actual resource and +// its attributes according to the schema. For example, a particular Compute +// Engine VM instance could be represented by the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels +// `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +type MonitoredResource struct { + // Required. The monitored resource type. This field must match + // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For + // example, the type of a Compute Engine VM instance is `gce_instance`. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResource) Reset() { *m = MonitoredResource{} } +func (m *MonitoredResource) String() string { return proto.CompactTextString(m) } +func (*MonitoredResource) ProtoMessage() {} +func (*MonitoredResource) Descriptor() ([]byte, []int) { + return fileDescriptor_monitored_resource_35ee91132be0d9ce, []int{1} +} +func (m *MonitoredResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResource.Unmarshal(m, b) +} +func (m *MonitoredResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResource.Marshal(b, m, deterministic) +} +func (dst *MonitoredResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResource.Merge(dst, src) +} +func (m *MonitoredResource) XXX_Size() int { + return xxx_messageInfo_MonitoredResource.Size(m) +} +func (m *MonitoredResource) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResource.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResource proto.InternalMessageInfo + +func (m *MonitoredResource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. +// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to +// uniquely identify a monitored resource instance. There is some other useful +// auxiliary metadata. Google Stackdriver Monitoring & Logging uses an ingestion +// pipeline to extract metadata for cloud resources of all types , and stores +// the metadata in this message. +type MonitoredResourceMetadata struct { + // Output only. Values for predefined system metadata labels. + // System labels are a kind of metadata extracted by Google Stackdriver. + // Stackdriver determines what system labels are useful and how to obtain + // their values. Some examples: "machine_image", "vpc", "subnet_id", + // "security_group", "name", etc. + // System label values can be only strings, Boolean values, or a list of + // strings. For example: + // + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + SystemLabels *_struct.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` + // Output only. A map of user-defined metadata labels. + UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResourceMetadata) Reset() { *m = MonitoredResourceMetadata{} } +func (m *MonitoredResourceMetadata) String() string { return proto.CompactTextString(m) } +func (*MonitoredResourceMetadata) ProtoMessage() {} +func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_monitored_resource_35ee91132be0d9ce, []int{2} +} +func (m *MonitoredResourceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResourceMetadata.Unmarshal(m, b) +} +func (m *MonitoredResourceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResourceMetadata.Marshal(b, m, deterministic) +} +func (dst *MonitoredResourceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResourceMetadata.Merge(dst, src) +} +func (m *MonitoredResourceMetadata) XXX_Size() int { + return xxx_messageInfo_MonitoredResourceMetadata.Size(m) +} +func (m *MonitoredResourceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResourceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResourceMetadata proto.InternalMessageInfo + +func (m *MonitoredResourceMetadata) GetSystemLabels() *_struct.Struct { + if m != nil { + return m.SystemLabels + } + return nil +} + +func (m *MonitoredResourceMetadata) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func init() { + proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor") + proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource") + proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResource.LabelsEntry") + proto.RegisterType((*MonitoredResourceMetadata)(nil), "google.api.MonitoredResourceMetadata") + proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResourceMetadata.UserLabelsEntry") +} + +func init() { + proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor_monitored_resource_35ee91132be0d9ce) +} + +var fileDescriptor_monitored_resource_35ee91132be0d9ce = []byte{ + // 415 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0xab, 0xd3, 0x40, + 0x14, 0x65, 0xd2, 0x0f, 0xf0, 0xa6, 0x7e, 0x0d, 0x52, 0x63, 0xea, 0xa2, 0xd6, 0x4d, 0xdd, 0x24, + 0xd0, 0x22, 0xf8, 0xb9, 0x68, 0x55, 0x44, 0xb0, 0x52, 0x22, 0xba, 0x70, 0x13, 0xa6, 0xc9, 0x18, + 0x82, 0x49, 0x26, 0xcc, 0x4c, 0x84, 0xfc, 0x1d, 0xc1, 0xdf, 0xe1, 0x5f, 0x72, 0xe9, 0x52, 0x32, + 0x33, 0x69, 0xd3, 0x97, 0xc7, 0x83, 0xb7, 0xbb, 0xf7, 0xdc, 0x73, 0xcf, 0x3d, 0x27, 0x43, 0xe0, + 0x71, 0xc2, 0x58, 0x92, 0x51, 0x9f, 0x94, 0xa9, 0x9f, 0xb3, 0x22, 0x95, 0x8c, 0xd3, 0x38, 0xe4, + 0x54, 0xb0, 0x8a, 0x47, 0xd4, 0x2b, 0x39, 0x93, 0x0c, 0x83, 0x26, 0x79, 0xa4, 0x4c, 0xdd, 0x69, + 0x67, 0x21, 0x23, 0x07, 0x9a, 0x69, 0x8e, 0xfb, 0xd0, 0xe0, 0xaa, 0x3b, 0x54, 0xdf, 0x7d, 0x21, + 0x79, 0x15, 0x49, 0x3d, 0x5d, 0xfc, 0x41, 0x30, 0xdb, 0xb5, 0xf2, 0x81, 0x51, 0x7f, 0x4b, 0x45, + 0xc4, 0xd3, 0x52, 0x32, 0x8e, 0x31, 0x0c, 0x0b, 0x92, 0x53, 0x67, 0x34, 0x47, 0xcb, 0x1b, 0x81, + 0xaa, 0x1b, 0x4c, 0xd6, 0x25, 0x75, 0x90, 0xc6, 0x9a, 0x1a, 0x3f, 0x82, 0x49, 0x9c, 0x8a, 0x32, + 0x23, 0x75, 0xa8, 0xf8, 0x96, 0x9a, 0xd9, 0x06, 0xfb, 0xd4, 0xac, 0xcd, 0xc1, 0x8e, 0x8d, 0x70, + 0xca, 0x0a, 0x67, 0x60, 0x18, 0x27, 0x08, 0xaf, 0x61, 0xac, 0x9c, 0x0b, 0x67, 0x38, 0x1f, 0x2c, + 0xed, 0xd5, 0xcc, 0x3b, 0xe5, 0xf3, 0x3e, 0x36, 0x93, 0x93, 0xb3, 0xc0, 0x50, 0x17, 0xbf, 0x11, + 0xdc, 0xed, 0x25, 0xb8, 0xd4, 0xe3, 0xe6, 0x28, 0x6f, 0x29, 0xf9, 0x27, 0x5d, 0xf9, 0x9e, 0x84, + 0x3e, 0x28, 0xde, 0x15, 0x92, 0xd7, 0xed, 0x31, 0xf7, 0x39, 0xd8, 0x1d, 0x18, 0xdf, 0x81, 0xc1, + 0x0f, 0x5a, 0x9b, 0x23, 0x4d, 0x89, 0xef, 0xc1, 0xe8, 0x27, 0xc9, 0xaa, 0xf6, 0x03, 0xe8, 0xe6, + 0x85, 0xf5, 0x0c, 0x2d, 0xfe, 0x22, 0x78, 0xd0, 0x3b, 0xb2, 0xa3, 0x92, 0xc4, 0x44, 0x12, 0xfc, + 0x0a, 0x6e, 0x8a, 0x5a, 0x48, 0x9a, 0x87, 0xc6, 0x62, 0xa3, 0x69, 0xaf, 0xee, 0xb7, 0x16, 0xdb, + 0xd7, 0xf3, 0x3e, 0xab, 0xd7, 0x0b, 0x26, 0x9a, 0xad, 0xcd, 0xe0, 0xaf, 0x60, 0x57, 0x82, 0xf2, + 0xf0, 0x2c, 0xde, 0xd3, 0x2b, 0xe3, 0xb5, 0x97, 0xbd, 0x2f, 0x82, 0xf2, 0x6e, 0x54, 0xa8, 0x8e, + 0x80, 0xfb, 0x1a, 0x6e, 0x5f, 0x18, 0x5f, 0x27, 0xf2, 0xb6, 0x86, 0x5b, 0x11, 0xcb, 0x3b, 0x36, + 0xb6, 0xd3, 0x9e, 0x8f, 0x7d, 0x13, 0x6c, 0x8f, 0xbe, 0xbd, 0x31, 0xac, 0x84, 0x65, 0xa4, 0x48, + 0x3c, 0xc6, 0x13, 0x3f, 0xa1, 0x85, 0x8a, 0xed, 0xeb, 0x11, 0x29, 0x53, 0x71, 0xfe, 0x3b, 0x70, + 0x2a, 0x5e, 0x76, 0x9b, 0x7f, 0x08, 0xfd, 0xb2, 0x86, 0xef, 0x37, 0xfb, 0x0f, 0x87, 0xb1, 0xda, + 0x5c, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x10, 0x16, 0x7c, 0xe9, 0x47, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go new file mode 100644 index 000000000..d07082edc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go @@ -0,0 +1,1391 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudtrace/v2/trace.proto + +package cloudtrace // import "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import status "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 1, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 3, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Often, a trace contains a root span +// that describes the end-to-end latency, and one or more subspans for +// its sub-operations. A trace can also contain multiple root spans, +// or none at all. Spans do not need to be contiguous—there may be +// gaps or overlaps between spans in a trace. +type Span struct { + // The resource name of the span in the following format: + // + // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // [TRACE_ID] is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // [SPAN_ID] is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The [SPAN_ID] portion of the span's resource name. + SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The [SPAN_ID] of this span's parent span. If this is a root span, + // then this field must be empty. + ParentSpanId string `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation (up to 128 bytes). + // Stackdriver Trace displays the description in the + // {% dynamic print site_values.console_name %}. + // For example, the display name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name within an application and at the same call point. + // This makes it easier to correlate spans in different traces. + DisplayName *TruncatableString `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. You can have up to 32 attributes per + // span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // Stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // A set of time events. You can have up to 32 annotations and 128 message + // events per span. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // Links associated with the span. You can have up to 128 links per Span. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // An optional final status for this span. + Status *status.Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // (Optional) Set this parameter to indicate whether this span is in + // the same process as its parent. If you do not set this parameter, + // Stackdriver Trace is unable to take advantage of this helpful + // information. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // An optional number of child spans that were generated while this span + // was active. If set, allows implementation to detect missing child spans. + ChildSpanCount *wrappers.Int32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0} +} +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (dst *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(dst, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetSpanId() string { + if m != nil { + return m.SpanId + } + return "" +} + +func (m *Span) GetParentSpanId() string { + if m != nil { + return m.ParentSpanId + } + return "" +} + +func (m *Span) GetDisplayName() *TruncatableString { + if m != nil { + return m.DisplayName + } + return nil +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *status.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.Int32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// A set of attributes, each in the format `[KEY]:[VALUE]`. +type Span_Attributes struct { + // The set of attributes. Each attribute's key can be up to 128 bytes + // long. The value can be a string up to 256 bytes, an integer, or the + // Boolean values `true` and `false`. For example: + // + // "/instance_id": "my-instance" + // "/http/user_agent": "" + // "/http/request_bytes": 300 + // "abc.com/myattribute": true + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0 then all attributes are valid. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 0} +} +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (dst *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(dst, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The timestamp indicating the time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 1} +} +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (dst *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(dst, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Annotation); err != nil { + return err + } + case *Span_TimeEvent_MessageEvent_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MessageEvent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x) + } + return nil +} + +func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Span_TimeEvent) + switch tag { + case 2: // value.annotation + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_Annotation) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_Annotation_{msg} + return true, err + case 3: // value.message_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_MessageEvent) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_MessageEvent_{msg} + return true, err + default: + return false, nil + } +} + +func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + s := proto.Size(x.Annotation) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Span_TimeEvent_MessageEvent_: + s := proto.Size(x.MessageEvent) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. The maximum length for + // the description is 256 bytes. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. You can have up to 4 attributes + // per Annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 1, 0} +} +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (dst *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(dst, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // Type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. It is recommended to be unique within + // a Span. + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSizeBytes int64 `protobuf:"varint,3,opt,name=uncompressed_size_bytes,json=uncompressedSizeBytes,proto3" json:"uncompressed_size_bytes,omitempty"` + // The number of compressed bytes sent or received. If missing assumed to + // be the same size as uncompressed. + CompressedSizeBytes int64 `protobuf:"varint,4,opt,name=compressed_size_bytes,json=compressedSizeBytes,proto3" json:"compressed_size_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 1, 1} +} +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (dst *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(dst, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSizeBytes() int64 { + if m != nil { + return m.UncompressedSizeBytes + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSizeBytes() int64 { + if m != nil { + return m.CompressedSizeBytes + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key:value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 2} +} +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (dst *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(dst, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // The [TRACE_ID] for a trace within a project. + TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // The [SPAN_ID] for a span within a trace. + SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. You have have up to 32 attributes per + // link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 3} +} +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (dst *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(dst, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +func (m *Span_Link) GetSpanId() string { + if m != nil { + return m.SpanId + } + return "" +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{0, 4} +} +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (dst *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(dst, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The allowed types for [VALUE] in a `[KEY]:[VALUE]` attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{1} +} +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (dst *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(dst, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + } +} + +func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StringValue); err != nil { + return err + } + case *AttributeValue_IntValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AttributeValue.Value has unexpected type %T", x) + } + return nil +} + +func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttributeValue) + switch tag { + case 1: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TruncatableString) + err := b.DecodeMessage(msg) + m.Value = &AttributeValue_StringValue{msg} + return true, err + case 2: // value.int_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_IntValue{int64(x)} + return true, err + case 3: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_BoolValue{x != 0} + return true, err + default: + return false, nil + } +} + +func _AttributeValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + s := proto.Size(x.StringValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AttributeValue_IntValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + n += 1 // tag and wire + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A call stack appearing in a trace. +type StackTrace struct { + // Stack frames in this stack trace. A maximum of 128 frames are allowed. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both the + // `stackFrame` content and a value in `stackTraceHashId`. + // + // Subsequent spans within the same request can refer + // to that stack trace by only setting `stackTraceHashId`. + StackTraceHashId int64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{2} +} +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (dst *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(dst, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() int64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// Represents a single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame (up to 1024 bytes). + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully-qualified (up to 1024 bytes). + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears (up to 256 + // bytes). + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code (up to 128 bytes). + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{2, 0} +} +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (dst *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(dst, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{2, 1} +} +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (dst *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(dst, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// Binary module. +type Module struct { + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so (up to 256 bytes). + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents (up to 128 bytes). + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{3} +} +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (dst *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(dst, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// Represents a string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string is 500 + // bytes long and the limit of the string is 128 bytes, then + // `value` contains the first 128 bytes of the 500-byte string. + // + // Truncation always happens on a UTF8 character boundary. If there + // are multi-byte characters in the string, then the length of the + // shortened string might be less than the size limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_trace_fa9cb54dc45915f9, []int{4} +} +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (dst *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(dst, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterType((*Span)(nil), "google.devtools.cloudtrace.v2.Span") + proto.RegisterType((*Span_Attributes)(nil), "google.devtools.cloudtrace.v2.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "google.devtools.cloudtrace.v2.Span.Link") + proto.RegisterType((*Span_Links)(nil), "google.devtools.cloudtrace.v2.Span.Links") + proto.RegisterType((*AttributeValue)(nil), "google.devtools.cloudtrace.v2.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "google.devtools.cloudtrace.v2.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "google.devtools.cloudtrace.v2.Module") + proto.RegisterType((*TruncatableString)(nil), "google.devtools.cloudtrace.v2.TruncatableString") + proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) +} + +func init() { + proto.RegisterFile("google/devtools/cloudtrace/v2/trace.proto", fileDescriptor_trace_fa9cb54dc45915f9) +} + +var fileDescriptor_trace_fa9cb54dc45915f9 = []byte{ + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6, + 0x16, 0x36, 0xf5, 0xd6, 0x91, 0x6c, 0xc8, 0x13, 0x3b, 0x56, 0x94, 0xe4, 0x26, 0xd7, 0xf7, 0x16, + 0x70, 0x0a, 0x98, 0x0a, 0x94, 0xa4, 0x48, 0xd3, 0x02, 0xa9, 0x1f, 0x72, 0xa4, 0xc4, 0x56, 0x05, + 0x4a, 0x71, 0xd3, 0x34, 0x00, 0x31, 0x22, 0xc7, 0x32, 0x11, 0x8a, 0x24, 0x38, 0x43, 0x17, 0xce, + 0xae, 0xeb, 0xae, 0xbb, 0x29, 0x50, 0x74, 0x59, 0x20, 0xab, 0xfc, 0x8e, 0x2e, 0xba, 0xed, 0x7f, + 0xe9, 0xaa, 0x98, 0x07, 0x49, 0x29, 0x2f, 0xdb, 0xca, 0x6e, 0x66, 0xce, 0xf9, 0x3e, 0x9e, 0x33, + 0x73, 0x5e, 0x84, 0x5b, 0x63, 0xdf, 0x1f, 0xbb, 0xa4, 0x69, 0x93, 0x13, 0xe6, 0xfb, 0x2e, 0x6d, + 0x5a, 0xae, 0x1f, 0xd9, 0x2c, 0xc4, 0x16, 0x69, 0x9e, 0xb4, 0x9a, 0x62, 0xa1, 0x07, 0xa1, 0xcf, + 0x7c, 0x74, 0x5d, 0xaa, 0xea, 0xb1, 0xaa, 0x9e, 0xaa, 0xea, 0x27, 0xad, 0xc6, 0x35, 0xc5, 0x84, + 0x03, 0xa7, 0x89, 0x3d, 0xcf, 0x67, 0x98, 0x39, 0xbe, 0x47, 0x25, 0xb8, 0x71, 0x43, 0x49, 0xc5, + 0x6e, 0x14, 0x1d, 0x35, 0x99, 0x33, 0x21, 0x94, 0xe1, 0x49, 0xa0, 0x14, 0xfe, 0xf3, 0xb6, 0xc2, + 0x8f, 0x21, 0x0e, 0x02, 0x12, 0xc6, 0x04, 0x6b, 0x4a, 0x1e, 0x06, 0x56, 0x93, 0x32, 0xcc, 0x22, + 0x25, 0x58, 0xff, 0x07, 0x41, 0x6e, 0x10, 0x60, 0x0f, 0x21, 0xc8, 0x79, 0x78, 0x42, 0xea, 0xda, + 0x4d, 0x6d, 0xa3, 0x6c, 0x88, 0x35, 0x5a, 0x83, 0x22, 0x0d, 0xb0, 0x67, 0x3a, 0x76, 0x3d, 0x23, + 0x8e, 0x0b, 0x7c, 0xdb, 0xb5, 0xd1, 0xff, 0x61, 0x29, 0xc0, 0x21, 0xf1, 0x98, 0x19, 0xcb, 0xb3, + 0x42, 0x5e, 0x95, 0xa7, 0x03, 0xa9, 0x35, 0x80, 0xaa, 0xed, 0xd0, 0xc0, 0xc5, 0xa7, 0xa6, 0xa0, + 0xce, 0xdd, 0xd4, 0x36, 0x2a, 0xad, 0xdb, 0xfa, 0x47, 0x6f, 0x42, 0x1f, 0x86, 0x91, 0x67, 0x61, + 0x86, 0x47, 0x2e, 0x19, 0xb0, 0xd0, 0xf1, 0xc6, 0x46, 0x45, 0xb1, 0xf4, 0xb8, 0x4d, 0x5f, 0x02, + 0x50, 0x86, 0x43, 0x66, 0xf2, 0x2b, 0xa8, 0xe7, 0x05, 0x65, 0x23, 0xa6, 0x8c, 0xdd, 0xd7, 0x87, + 0xf1, 0xfd, 0x18, 0x65, 0xa1, 0xcd, 0xf7, 0xe8, 0x1e, 0x94, 0x88, 0x67, 0x4b, 0x60, 0xe1, 0x4c, + 0x60, 0x91, 0x78, 0xb6, 0x80, 0xf5, 0x00, 0x30, 0x63, 0xa1, 0x33, 0x8a, 0x18, 0xa1, 0xf5, 0xa2, + 0x00, 0xea, 0x67, 0x38, 0xc1, 0x6f, 0x40, 0xdf, 0x4a, 0x50, 0xc6, 0x14, 0x03, 0x7a, 0x0c, 0x15, + 0xca, 0xb0, 0xf5, 0xd2, 0x14, 0xda, 0xf5, 0x92, 0x20, 0xbc, 0x75, 0x16, 0x21, 0x47, 0x0c, 0xf9, + 0xce, 0x00, 0x9a, 0xac, 0xd1, 0xb7, 0x50, 0xe1, 0xee, 0x98, 0xe4, 0x84, 0x78, 0x8c, 0xd6, 0xcb, + 0xe7, 0x37, 0x8e, 0xbb, 0xd6, 0x16, 0x28, 0x03, 0x58, 0xb2, 0x46, 0x0f, 0x21, 0xef, 0x3a, 0xde, + 0x4b, 0x5a, 0x87, 0xf3, 0x99, 0xc5, 0xa9, 0xf6, 0x39, 0xc0, 0x90, 0x38, 0xf4, 0x39, 0x14, 0x64, + 0x80, 0xd5, 0x2b, 0x82, 0x01, 0xc5, 0x0c, 0x61, 0x60, 0x71, 0x2f, 0x58, 0x44, 0x0d, 0xa5, 0x81, + 0x9e, 0xc1, 0x55, 0x8a, 0x27, 0xc4, 0x0c, 0x42, 0xdf, 0x22, 0x94, 0x9a, 0x98, 0x9a, 0x53, 0x61, + 0x55, 0xaf, 0x7e, 0xe0, 0x8d, 0xb6, 0x7d, 0xdf, 0x3d, 0xc4, 0x6e, 0x44, 0x8c, 0x35, 0x0e, 0xef, + 0x4b, 0xf4, 0x16, 0xed, 0x27, 0xc1, 0x87, 0xda, 0x50, 0xb3, 0x8e, 0x1d, 0xd7, 0x96, 0xf1, 0x69, + 0xf9, 0x91, 0xc7, 0xea, 0x8b, 0x82, 0xee, 0xea, 0x3b, 0x74, 0x5d, 0x8f, 0xdd, 0x69, 0x49, 0xbe, + 0x25, 0x01, 0xe2, 0x0c, 0x3b, 0x1c, 0xd2, 0xf8, 0x2d, 0x03, 0x90, 0xbe, 0x22, 0x22, 0xb0, 0x98, + 0xbc, 0xa3, 0x39, 0xc1, 0x41, 0x5d, 0xbb, 0x99, 0xdd, 0xa8, 0xb4, 0xbe, 0xb9, 0x58, 0x30, 0xa4, + 0xcb, 0x03, 0x1c, 0xb4, 0x3d, 0x16, 0x9e, 0x1a, 0x55, 0x3c, 0x75, 0x84, 0xee, 0x43, 0xdd, 0x0e, + 0xfd, 0x20, 0x20, 0xb6, 0x99, 0x86, 0x8d, 0x72, 0x82, 0xe7, 0x61, 0xde, 0xb8, 0xac, 0xe4, 0x29, + 0xa9, 0xb4, 0xd7, 0x83, 0xe5, 0x77, 0xc8, 0x51, 0x0d, 0xb2, 0x2f, 0xc9, 0xa9, 0x4a, 0x6c, 0xbe, + 0x44, 0x3b, 0x90, 0x3f, 0xe1, 0xfe, 0x0a, 0xb6, 0x4a, 0x6b, 0xf3, 0x0c, 0xfb, 0x13, 0x4a, 0x79, + 0x49, 0x12, 0xfb, 0x20, 0x73, 0x5f, 0x6b, 0xfc, 0x95, 0x87, 0x72, 0x12, 0x48, 0x48, 0x87, 0x9c, + 0xc8, 0x2d, 0xed, 0xcc, 0xdc, 0x12, 0x7a, 0xe8, 0x39, 0x40, 0x5a, 0xea, 0x94, 0x2d, 0xf7, 0x2f, + 0x14, 0xbb, 0xfa, 0x56, 0x82, 0xef, 0x2c, 0x18, 0x53, 0x6c, 0x08, 0xc3, 0xe2, 0x84, 0x50, 0x8a, + 0xc7, 0x2a, 0x37, 0x44, 0x81, 0xaa, 0xb4, 0x1e, 0x5c, 0x8c, 0xfe, 0x40, 0x52, 0x88, 0x4d, 0x67, + 0xc1, 0xa8, 0x4e, 0xa6, 0xf6, 0x8d, 0x37, 0x1a, 0x40, 0xfa, 0x7d, 0x64, 0x40, 0xc5, 0x26, 0xd4, + 0x0a, 0x9d, 0x40, 0xb8, 0xa3, 0xcd, 0x5d, 0xec, 0x52, 0x92, 0xb7, 0x4a, 0x4f, 0xe6, 0x53, 0x4b, + 0x4f, 0xe3, 0x97, 0x0c, 0x54, 0xa7, 0x7d, 0x42, 0x03, 0xc8, 0xb1, 0xd3, 0x40, 0x3e, 0xd9, 0x52, + 0xeb, 0xe1, 0xfc, 0xb7, 0xa3, 0x0f, 0x4f, 0x03, 0x62, 0x08, 0x32, 0xb4, 0x04, 0x19, 0xd5, 0x31, + 0xb2, 0x46, 0xc6, 0xb1, 0xd1, 0x17, 0xb0, 0x16, 0x79, 0x96, 0x3f, 0x09, 0x42, 0x42, 0x29, 0xb1, + 0x4d, 0xea, 0xbc, 0x22, 0xe6, 0xe8, 0x94, 0xbb, 0x94, 0x15, 0x4a, 0xab, 0xd3, 0xe2, 0x81, 0xf3, + 0x8a, 0x6c, 0x73, 0x21, 0x6a, 0xc1, 0xea, 0xfb, 0x51, 0x39, 0x81, 0xba, 0xf4, 0x1e, 0xcc, 0xfa, + 0x5d, 0xc8, 0x71, 0x4b, 0xd0, 0x0a, 0xd4, 0x86, 0xdf, 0xf7, 0xdb, 0xe6, 0xd3, 0xde, 0xa0, 0xdf, + 0xde, 0xe9, 0xee, 0x75, 0xdb, 0xbb, 0xb5, 0x05, 0x54, 0x82, 0xdc, 0xa0, 0xdd, 0x1b, 0xd6, 0x34, + 0x54, 0x85, 0x92, 0xd1, 0xde, 0x69, 0x77, 0x0f, 0xdb, 0xbb, 0xb5, 0xcc, 0x76, 0x51, 0x25, 0x44, + 0xe3, 0x6f, 0x0d, 0x20, 0xad, 0x8c, 0x68, 0x1f, 0x20, 0x2d, 0xaf, 0x2a, 0xdb, 0x37, 0x2f, 0x74, + 0x49, 0x46, 0x39, 0x29, 0xae, 0xe8, 0x01, 0x5c, 0x49, 0xf2, 0x3a, 0x6d, 0xf1, 0x33, 0x89, 0xbd, + 0x16, 0x27, 0x76, 0x2a, 0x17, 0x99, 0x8d, 0x1e, 0xc2, 0xb5, 0x18, 0x3b, 0x13, 0xd7, 0x31, 0x3c, + 0x2b, 0xe0, 0x31, 0xff, 0xf4, 0xcb, 0xa8, 0xd2, 0xf0, 0x6b, 0x06, 0x72, 0xbc, 0x50, 0xa3, 0x2b, + 0x50, 0x12, 0xb6, 0xf2, 0xae, 0x2d, 0x6b, 0x42, 0x51, 0xec, 0xbb, 0xf6, 0x87, 0xfb, 0xfd, 0x96, + 0x0a, 0x93, 0xac, 0x08, 0x93, 0xcd, 0xf3, 0x36, 0x85, 0xe9, 0xa0, 0x98, 0x0d, 0xe5, 0xdc, 0xa7, + 0x86, 0xf2, 0xfa, 0x93, 0x8f, 0x3e, 0xf4, 0x2a, 0x2c, 0xef, 0x74, 0xba, 0xfb, 0xbb, 0xe6, 0x7e, + 0xb7, 0xf7, 0xa4, 0xbd, 0x6b, 0x0e, 0xfa, 0x5b, 0xbd, 0x9a, 0x86, 0x2e, 0x03, 0xea, 0x6f, 0x19, + 0xed, 0xde, 0x70, 0xe6, 0x3c, 0xd3, 0x88, 0x20, 0x2f, 0x9a, 0x18, 0xfa, 0x1a, 0x72, 0xbc, 0x8d, + 0xa9, 0xa7, 0xde, 0x38, 0xaf, 0xa3, 0x86, 0x40, 0x21, 0x1d, 0x2e, 0xc5, 0x8f, 0x24, 0x9a, 0xe1, + 0xcc, 0xd3, 0x2e, 0x2b, 0x91, 0xf8, 0x90, 0x78, 0x93, 0xf5, 0x37, 0x1a, 0x2c, 0xcd, 0x16, 0x57, + 0xf4, 0x14, 0xaa, 0x54, 0x14, 0x02, 0x53, 0x56, 0xe8, 0x39, 0xcb, 0x48, 0x67, 0xc1, 0xa8, 0x48, + 0x1e, 0x49, 0x7b, 0x1d, 0xca, 0x8e, 0xc7, 0xcc, 0xb4, 0xea, 0x67, 0x3b, 0x0b, 0x46, 0xc9, 0xf1, + 0x98, 0x14, 0xdf, 0x00, 0x18, 0xf9, 0xbe, 0xab, 0xe4, 0xfc, 0x95, 0x4b, 0x9d, 0x05, 0xa3, 0x3c, + 0x8a, 0x1b, 0x6d, 0x92, 0x20, 0xeb, 0x7f, 0x14, 0x00, 0xd2, 0x59, 0x04, 0x3d, 0xe3, 0xe6, 0xf2, + 0x59, 0xe6, 0x28, 0xc4, 0x13, 0x42, 0x95, 0xb9, 0xf7, 0xce, 0x3d, 0xcc, 0xc8, 0xe5, 0x9e, 0x00, + 0x1b, 0x72, 0x2c, 0x92, 0x1b, 0xb4, 0x09, 0x97, 0xa6, 0xa6, 0x24, 0xf3, 0x18, 0xd3, 0x63, 0x33, + 0xa9, 0x2a, 0xb5, 0x74, 0x04, 0xea, 0x60, 0x7a, 0xdc, 0xb5, 0x1b, 0x3f, 0xe5, 0x94, 0x5d, 0x02, + 0x8e, 0x9e, 0xc2, 0xe2, 0x51, 0xe4, 0x59, 0x3c, 0x81, 0xcc, 0x64, 0xac, 0x9d, 0xa7, 0x1c, 0x57, + 0x63, 0x1a, 0x31, 0x7c, 0x1e, 0xc1, 0x65, 0x3f, 0x74, 0xc6, 0x8e, 0x87, 0x5d, 0x73, 0x96, 0x3f, + 0x33, 0x27, 0xff, 0x4a, 0xcc, 0xb7, 0x37, 0xfd, 0x9d, 0x03, 0x28, 0x1f, 0x39, 0x2e, 0x91, 0xd4, + 0xd9, 0x39, 0xa9, 0x4b, 0x9c, 0x42, 0xd0, 0xdd, 0x80, 0x8a, 0xeb, 0x78, 0xc4, 0xf4, 0xa2, 0xc9, + 0x88, 0x84, 0xaa, 0x7c, 0x02, 0x3f, 0xea, 0x89, 0x13, 0xf4, 0x3f, 0x58, 0xb4, 0x7c, 0x37, 0x9a, + 0x78, 0xb1, 0x4a, 0x5e, 0xa8, 0x54, 0xe5, 0xa1, 0x52, 0xda, 0x83, 0x8a, 0xeb, 0x63, 0xdb, 0x9c, + 0xf8, 0x76, 0xe4, 0xc6, 0x13, 0xf4, 0x67, 0x67, 0x98, 0x75, 0x20, 0x94, 0x0d, 0xe0, 0x48, 0xb9, + 0x46, 0xdf, 0xc1, 0x12, 0xf5, 0xa3, 0xd0, 0x22, 0xe6, 0x09, 0x09, 0x29, 0xef, 0x95, 0xc5, 0x39, + 0x3d, 0x5c, 0x94, 0x3c, 0x87, 0x92, 0xa6, 0xf1, 0xb3, 0x06, 0x95, 0xa9, 0x78, 0x42, 0x8f, 0x21, + 0x2f, 0xc2, 0x52, 0x65, 0xf3, 0xdd, 0x79, 0xa2, 0xd2, 0x90, 0x14, 0xe8, 0x36, 0xac, 0xc4, 0xa9, + 0x2d, 0x43, 0x7d, 0x26, 0xb7, 0x91, 0x92, 0xc9, 0x0f, 0xcb, 0xe4, 0xfe, 0x5d, 0x83, 0x82, 0xf2, + 0xb8, 0x03, 0x05, 0x75, 0x69, 0xf3, 0x86, 0xa1, 0xc2, 0xa3, 0x27, 0x50, 0x1a, 0x45, 0x7c, 0xae, + 0x55, 0xa9, 0x30, 0x0f, 0x57, 0x51, 0x30, 0x74, 0xed, 0xf5, 0x1f, 0x60, 0xf9, 0x1d, 0x29, 0x5a, + 0x89, 0x67, 0x43, 0xd9, 0x1b, 0xe4, 0x86, 0xbb, 0xcf, 0xa4, 0x2a, 0xb1, 0x45, 0x13, 0x9e, 0x75, + 0x3f, 0x91, 0xf1, 0x26, 0x2c, 0xdc, 0xdf, 0x7e, 0xad, 0xc1, 0x7f, 0x2d, 0x7f, 0xf2, 0x71, 0xeb, + 0xb6, 0x41, 0xdc, 0x77, 0x9f, 0x4f, 0x88, 0x7d, 0xed, 0xf9, 0x23, 0xa5, 0x3c, 0xf6, 0x5d, 0xec, + 0x8d, 0x75, 0x3f, 0x1c, 0x37, 0xc7, 0xc4, 0x13, 0xf3, 0x63, 0x53, 0x8a, 0x70, 0xe0, 0xd0, 0x0f, + 0xfc, 0x6d, 0x7f, 0x95, 0xee, 0x5e, 0x67, 0x56, 0x1f, 0x49, 0xa6, 0x1d, 0x7e, 0xa6, 0xcb, 0x47, + 0x3d, 0x6c, 0xfd, 0x19, 0x9f, 0xbf, 0x10, 0xe7, 0x2f, 0xc4, 0xf9, 0x8b, 0xc3, 0xd6, 0xa8, 0x20, + 0xbe, 0x71, 0xe7, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x3c, 0x8a, 0x77, 0xd0, 0x0f, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go new file mode 100644 index 000000000..6e6925cab --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudtrace/v2/tracing.proto + +package cloudtrace // import "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "github.com/golang/protobuf/ptypes/timestamp" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The request message for the `BatchWriteSpans` method. +type BatchWriteSpansRequest struct { + // Required. The name of the project where the spans belong. The format is + // `projects/[PROJECT_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A list of new spans. The span names must not match existing + // spans, or the results are undefined. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchWriteSpansRequest) Reset() { *m = BatchWriteSpansRequest{} } +func (m *BatchWriteSpansRequest) String() string { return proto.CompactTextString(m) } +func (*BatchWriteSpansRequest) ProtoMessage() {} +func (*BatchWriteSpansRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_tracing_18786c49399bd83d, []int{0} +} +func (m *BatchWriteSpansRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchWriteSpansRequest.Unmarshal(m, b) +} +func (m *BatchWriteSpansRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchWriteSpansRequest.Marshal(b, m, deterministic) +} +func (dst *BatchWriteSpansRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchWriteSpansRequest.Merge(dst, src) +} +func (m *BatchWriteSpansRequest) XXX_Size() int { + return xxx_messageInfo_BatchWriteSpansRequest.Size(m) +} +func (m *BatchWriteSpansRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BatchWriteSpansRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchWriteSpansRequest proto.InternalMessageInfo + +func (m *BatchWriteSpansRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BatchWriteSpansRequest) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +func init() { + proto.RegisterType((*BatchWriteSpansRequest)(nil), "google.devtools.cloudtrace.v2.BatchWriteSpansRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // Sends new spans to new or existing traces. You cannot update + // existing spans. + BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Creates a new span. + CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *traceServiceClient) CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) { + out := new(Span) + err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // Sends new spans to new or existing traces. You cannot update + // existing spans. + BatchWriteSpans(context.Context, *BatchWriteSpansRequest) (*empty.Empty, error) + // Creates a new span. + CreateSpan(context.Context, *Span) (*Span, error) +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_BatchWriteSpans_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchWriteSpansRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).BatchWriteSpans(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).BatchWriteSpans(ctx, req.(*BatchWriteSpansRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TraceService_CreateSpan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Span) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).CreateSpan(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).CreateSpan(ctx, req.(*Span)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.cloudtrace.v2.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "BatchWriteSpans", + Handler: _TraceService_BatchWriteSpans_Handler, + }, + { + MethodName: "CreateSpan", + Handler: _TraceService_CreateSpan_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/cloudtrace/v2/tracing.proto", +} + +func init() { + proto.RegisterFile("google/devtools/cloudtrace/v2/tracing.proto", fileDescriptor_tracing_18786c49399bd83d) +} + +var fileDescriptor_tracing_18786c49399bd83d = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xdd, 0x6a, 0xdb, 0x30, + 0x14, 0x46, 0xde, 0x0f, 0x4c, 0x1b, 0x0c, 0x04, 0x0b, 0xc1, 0xdb, 0x58, 0xe6, 0x0d, 0x96, 0x64, + 0x43, 0x02, 0x8f, 0x5d, 0x2c, 0x63, 0x37, 0x09, 0x23, 0xb7, 0x21, 0x19, 0x19, 0x8c, 0xdc, 0x28, + 0x8e, 0xa6, 0x69, 0xd8, 0x92, 0x67, 0x29, 0x86, 0x52, 0x7a, 0xd3, 0x9b, 0x3e, 0x40, 0xfb, 0x14, + 0xa5, 0xd0, 0xf7, 0xe8, 0x6d, 0x5f, 0xa1, 0x0f, 0x52, 0x24, 0xd9, 0x0d, 0x84, 0x34, 0xc9, 0x9d, + 0xce, 0x39, 0xdf, 0xf9, 0xce, 0xf7, 0x7d, 0x36, 0xfc, 0xc8, 0x95, 0xe2, 0x29, 0x23, 0x0b, 0x56, + 0x1a, 0xa5, 0x52, 0x4d, 0x92, 0x54, 0x2d, 0x17, 0xa6, 0xa0, 0x09, 0x23, 0x65, 0x4c, 0xec, 0x43, + 0x48, 0x8e, 0xf3, 0x42, 0x19, 0x85, 0x5e, 0x7b, 0x30, 0xae, 0xc1, 0x78, 0x05, 0xc6, 0x65, 0x1c, + 0xbe, 0xaa, 0xb8, 0x68, 0x2e, 0x08, 0x95, 0x52, 0x19, 0x6a, 0x84, 0x92, 0xda, 0x2f, 0x87, 0x9d, + 0xdd, 0x97, 0x58, 0x05, 0x7d, 0x59, 0x41, 0x5d, 0x35, 0x5f, 0xfe, 0x21, 0x2c, 0xcb, 0xcd, 0x41, + 0x35, 0x7c, 0xb3, 0x3e, 0x34, 0x22, 0x63, 0xda, 0xd0, 0x2c, 0xf7, 0x80, 0x88, 0xc3, 0x46, 0x9f, + 0x9a, 0xe4, 0xef, 0xaf, 0x42, 0x18, 0x36, 0xc9, 0xa9, 0xd4, 0x63, 0xf6, 0x7f, 0xc9, 0xb4, 0x41, + 0x08, 0x3e, 0x94, 0x34, 0x63, 0x4d, 0xd0, 0x02, 0xed, 0x27, 0x63, 0xf7, 0x46, 0x5f, 0xe1, 0x23, + 0x6d, 0x31, 0xcd, 0xa0, 0xf5, 0xa0, 0xfd, 0x34, 0x7e, 0x87, 0xb7, 0x7a, 0xc4, 0x96, 0x6f, 0xec, + 0x37, 0xe2, 0xcb, 0x00, 0x3e, 0xfb, 0x69, 0x07, 0x13, 0x56, 0x94, 0x22, 0x61, 0xe8, 0x0c, 0xc0, + 0xe7, 0x6b, 0xa7, 0xd1, 0x97, 0x1d, 0x84, 0x9b, 0xa5, 0x86, 0x8d, 0x7a, 0xad, 0xb6, 0x89, 0x7f, + 0xd8, 0x0c, 0xa2, 0xf8, 0xf8, 0xfa, 0xe6, 0x34, 0xf8, 0x14, 0x7d, 0xb0, 0x99, 0x1d, 0x5a, 0x07, + 0xdf, 0xf3, 0x42, 0xfd, 0x63, 0x89, 0xd1, 0xa4, 0x7b, 0xe4, 0x53, 0xd4, 0xbd, 0xf9, 0x1d, 0x69, + 0x0f, 0x74, 0xd1, 0x09, 0x80, 0x70, 0x50, 0x30, 0xea, 0x4f, 0xa0, 0x7d, 0x2c, 0x86, 0xfb, 0x80, + 0x22, 0xe2, 0xc4, 0x74, 0xa2, 0xf7, 0x9b, 0xc4, 0x54, 0x5a, 0xac, 0x2a, 0x17, 0x57, 0x0f, 0x74, + 0xfb, 0x17, 0x00, 0xbe, 0x4d, 0x54, 0xb6, 0x9d, 0xbb, 0xef, 0x42, 0x15, 0x92, 0x8f, 0xac, 0xf5, + 0x11, 0xf8, 0x3d, 0xac, 0xe0, 0x5c, 0xa5, 0x54, 0x72, 0xac, 0x0a, 0x4e, 0x38, 0x93, 0x2e, 0x18, + 0xe2, 0x47, 0x34, 0x17, 0xfa, 0x9e, 0x1f, 0xeb, 0xdb, 0xaa, 0x3a, 0x0f, 0x5e, 0x0c, 0x3d, 0xd3, + 0xc0, 0xf6, 0xb0, 0xfb, 0x76, 0x78, 0x1a, 0x5f, 0xd5, 0xfd, 0x99, 0xeb, 0xcf, 0x5c, 0x7f, 0x36, + 0x8d, 0xe7, 0x8f, 0xdd, 0x8d, 0xcf, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x94, 0x51, 0x1d, + 0x25, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go new file mode 100644 index 000000000..d134a3cb9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go @@ -0,0 +1,958 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Operators for combining conditions. +type AlertPolicy_ConditionCombinerType int32 + +const ( + // An unspecified combiner. + AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 + // Combine conditions using the logical `AND` operator. An + // incident is created only if all conditions are met + // simultaneously. This combiner is satisfied if all conditions are + // met, even if they are met on completely different resources. + AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 + // Combine conditions using the logical `OR` operator. An incident + // is created if any of the listed conditions is met. + AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 + // Combine conditions using logical `AND` operator, but unlike the regular + // `AND` option, an incident is created only if all conditions are met + // simultaneously on at least one resource. + AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 +) + +var AlertPolicy_ConditionCombinerType_name = map[int32]string{ + 0: "COMBINE_UNSPECIFIED", + 1: "AND", + 2: "OR", + 3: "AND_WITH_MATCHING_RESOURCE", +} +var AlertPolicy_ConditionCombinerType_value = map[string]int32{ + "COMBINE_UNSPECIFIED": 0, + "AND": 1, + "OR": 2, + "AND_WITH_MATCHING_RESOURCE": 3, +} + +func (x AlertPolicy_ConditionCombinerType) String() string { + return proto.EnumName(AlertPolicy_ConditionCombinerType_name, int32(x)) +} +func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_alert_9e390b6dbd40ea14, []int{0, 0} +} + +// A description of the conditions under which some aspect of your system is +// considered to be "unhealthy" and the ways to notify people or services about +// this state. For an overview of alert policies, see +// [Introduction to Alerting](/monitoring/alerts/). +type AlertPolicy struct { + // Required if the policy exists. The resource name for this policy. The + // syntax is: + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + // + // `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy + // is created. When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the alerting policy passed as + // part of the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the policy in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple policies in the same project. The name is + // limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Documentation that is included with notifications and incidents related to + // this policy. Best practice is for the documentation to include information + // to help responders understand, mitigate, escalate, and correct the + // underlying problems detected by the alerting policy. Notification channels + // that have limited capacity might not show this documentation. + Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `AlertPolicy` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of conditions for the policy. The conditions are combined by AND or + // OR according to the `combiner` field. If the combined conditions evaluate + // to true, then an incident is created. A policy can have from one to six + // conditions. + Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"` + // How to combine the results of multiple conditions + // to determine if an incident should be opened. + Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` + // Whether or not the policy is enabled. On write, the default interpretation + // if unset is that the policy is enabled. On read, clients should not make + // any assumption about the state if it has not been populated. The + // field should always be populated on List and Get operations, unless + // a field projection has been specified that strips it out. + Enabled *wrappers.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Identifies the notification channels to which notifications should be sent + // when incidents are opened or closed or when new violations occur on + // an already opened incident. Each element of this array corresponds to + // the `name` field in each of the + // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] + // objects that are returned from the [`ListNotificationChannels`] + // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // method. The syntax of the entries in this field is: + // + // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] + NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // A read-only record of the creation of the alerting policy. If provided + // in a call to create or update, this field will be ignored. + CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // A read-only record of the most recent change to the alerting policy. If + // provided in a call to create or update, this field will be ignored. + MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy) Reset() { *m = AlertPolicy{} } +func (m *AlertPolicy) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy) ProtoMessage() {} +func (*AlertPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_9e390b6dbd40ea14, []int{0} +} +func (m *AlertPolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy.Unmarshal(m, b) +} +func (m *AlertPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy.Marshal(b, m, deterministic) +} +func (dst *AlertPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy.Merge(dst, src) +} +func (m *AlertPolicy) XXX_Size() int { + return xxx_messageInfo_AlertPolicy.Size(m) +} +func (m *AlertPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy proto.InternalMessageInfo + +func (m *AlertPolicy) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { + if m != nil { + return m.Documentation + } + return nil +} + +func (m *AlertPolicy) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *AlertPolicy) GetConditions() []*AlertPolicy_Condition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { + if m != nil { + return m.Combiner + } + return AlertPolicy_COMBINE_UNSPECIFIED +} + +func (m *AlertPolicy) GetEnabled() *wrappers.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func (m *AlertPolicy) GetNotificationChannels() []string { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *AlertPolicy) GetCreationRecord() *MutationRecord { + if m != nil { + return m.CreationRecord + } + return nil +} + +func (m *AlertPolicy) GetMutationRecord() *MutationRecord { + if m != nil { + return m.MutationRecord + } + return nil +} + +// A content string and a MIME type that describes the content string's +// format. +type AlertPolicy_Documentation struct { + // The text of the documentation, interpreted according to `mime_type`. + // The content may not exceed 8,192 Unicode characters and may not exceed + // more than 10,240 bytes when encoded in UTF-8 format, whichever is + // smaller. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The format of the `content` field. Presently, only the value + // `"text/markdown"` is supported. See + // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Documentation) Reset() { *m = AlertPolicy_Documentation{} } +func (m *AlertPolicy_Documentation) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Documentation) ProtoMessage() {} +func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_9e390b6dbd40ea14, []int{0, 0} +} +func (m *AlertPolicy_Documentation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Documentation.Unmarshal(m, b) +} +func (m *AlertPolicy_Documentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Documentation.Marshal(b, m, deterministic) +} +func (dst *AlertPolicy_Documentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Documentation.Merge(dst, src) +} +func (m *AlertPolicy_Documentation) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Documentation.Size(m) +} +func (m *AlertPolicy_Documentation) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Documentation.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Documentation proto.InternalMessageInfo + +func (m *AlertPolicy_Documentation) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *AlertPolicy_Documentation) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +// A condition is a true/false test that determines when an alerting policy +// should open an incident. If a condition evaluates to true, it signifies +// that something is wrong. +type AlertPolicy_Condition struct { + // Required if the condition exists. The unique resource name for this + // condition. Its syntax is: + // + // projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // + // `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the + // condition is created as part of a new or updated alerting policy. + // + // When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the conditions of the + // requested alerting policy. Stackdriver Monitoring creates the + // condition identifiers and includes them in the new policy. + // + // When calling the + // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + // method to update a policy, including a condition `name` causes the + // existing condition to be updated. Conditions without names are added to + // the updated policy. Existing conditions are deleted if they are not + // updated. + // + // Best practice is to preserve `[CONDITION_ID]` if you make only small + // changes, such as those to condition thresholds, durations, or trigger + // values. Otherwise, treat the change as a new condition and let the + // existing condition be deleted. + Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the condition in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple conditions in the same policy. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Only one of the following condition types will be specified. + // + // Types that are valid to be assigned to Condition: + // *AlertPolicy_Condition_ConditionThreshold + // *AlertPolicy_Condition_ConditionAbsent + Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition) Reset() { *m = AlertPolicy_Condition{} } +func (m *AlertPolicy_Condition) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition) ProtoMessage() {} +func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_9e390b6dbd40ea14, []int{0, 1} +} +func (m *AlertPolicy_Condition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition.Marshal(b, m, deterministic) +} +func (dst *AlertPolicy_Condition) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition.Merge(dst, src) +} +func (m *AlertPolicy_Condition) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition.Size(m) +} +func (m *AlertPolicy_Condition) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition proto.InternalMessageInfo + +func (m *AlertPolicy_Condition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy_Condition) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +type isAlertPolicy_Condition_Condition interface { + isAlertPolicy_Condition_Condition() +} + +type AlertPolicy_Condition_ConditionThreshold struct { + ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionAbsent struct { + ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} + +func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { + return x.ConditionThreshold + } + return nil +} + +func (m *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { + return x.ConditionAbsent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AlertPolicy_Condition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AlertPolicy_Condition_OneofMarshaler, _AlertPolicy_Condition_OneofUnmarshaler, _AlertPolicy_Condition_OneofSizer, []interface{}{ + (*AlertPolicy_Condition_ConditionThreshold)(nil), + (*AlertPolicy_Condition_ConditionAbsent)(nil), + } +} + +func _AlertPolicy_Condition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AlertPolicy_Condition) + // condition + switch x := m.Condition.(type) { + case *AlertPolicy_Condition_ConditionThreshold: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConditionThreshold); err != nil { + return err + } + case *AlertPolicy_Condition_ConditionAbsent: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConditionAbsent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AlertPolicy_Condition.Condition has unexpected type %T", x) + } + return nil +} + +func _AlertPolicy_Condition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AlertPolicy_Condition) + switch tag { + case 1: // condition.condition_threshold + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AlertPolicy_Condition_MetricThreshold) + err := b.DecodeMessage(msg) + m.Condition = &AlertPolicy_Condition_ConditionThreshold{msg} + return true, err + case 2: // condition.condition_absent + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AlertPolicy_Condition_MetricAbsence) + err := b.DecodeMessage(msg) + m.Condition = &AlertPolicy_Condition_ConditionAbsent{msg} + return true, err + default: + return false, nil + } +} + +func _AlertPolicy_Condition_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AlertPolicy_Condition) + // condition + switch x := m.Condition.(type) { + case *AlertPolicy_Condition_ConditionThreshold: + s := proto.Size(x.ConditionThreshold) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AlertPolicy_Condition_ConditionAbsent: + s := proto.Size(x.ConditionAbsent) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies how many time series must fail a predicate to trigger a +// condition. If not specified, then a `{count: 1}` trigger is used. +type AlertPolicy_Condition_Trigger struct { + // A type of trigger. + // + // Types that are valid to be assigned to Type: + // *AlertPolicy_Condition_Trigger_Count + // *AlertPolicy_Condition_Trigger_Percent + Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_Trigger) Reset() { *m = AlertPolicy_Condition_Trigger{} } +func (m *AlertPolicy_Condition_Trigger) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} +func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_9e390b6dbd40ea14, []int{0, 1, 0} +} +func (m *AlertPolicy_Condition_Trigger) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Marshal(b, m, deterministic) +} +func (dst *AlertPolicy_Condition_Trigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_Trigger.Merge(dst, src) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Size(m) +} +func (m *AlertPolicy_Condition_Trigger) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_Trigger.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_Trigger proto.InternalMessageInfo + +type isAlertPolicy_Condition_Trigger_Type interface { + isAlertPolicy_Condition_Trigger_Type() +} + +type AlertPolicy_Condition_Trigger_Count struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"` +} + +type AlertPolicy_Condition_Trigger_Percent struct { + Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} + +func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} + +func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *AlertPolicy_Condition_Trigger) GetCount() int32 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { + return x.Count + } + return 0 +} + +func (m *AlertPolicy_Condition_Trigger) GetPercent() float64 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { + return x.Percent + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AlertPolicy_Condition_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AlertPolicy_Condition_Trigger_OneofMarshaler, _AlertPolicy_Condition_Trigger_OneofUnmarshaler, _AlertPolicy_Condition_Trigger_OneofSizer, []interface{}{ + (*AlertPolicy_Condition_Trigger_Count)(nil), + (*AlertPolicy_Condition_Trigger_Percent)(nil), + } +} + +func _AlertPolicy_Condition_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AlertPolicy_Condition_Trigger) + // type + switch x := m.Type.(type) { + case *AlertPolicy_Condition_Trigger_Count: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Count)) + case *AlertPolicy_Condition_Trigger_Percent: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.Percent)) + case nil: + default: + return fmt.Errorf("AlertPolicy_Condition_Trigger.Type has unexpected type %T", x) + } + return nil +} + +func _AlertPolicy_Condition_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AlertPolicy_Condition_Trigger) + switch tag { + case 1: // type.count + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &AlertPolicy_Condition_Trigger_Count{int32(x)} + return true, err + case 2: // type.percent + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Type = &AlertPolicy_Condition_Trigger_Percent{math.Float64frombits(x)} + return true, err + default: + return false, nil + } +} + +func _AlertPolicy_Condition_Trigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AlertPolicy_Condition_Trigger) + // type + switch x := m.Type.(type) { + case *AlertPolicy_Condition_Trigger_Count: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Count)) + case *AlertPolicy_Condition_Trigger_Percent: + n += 1 // tag and wire + n += 8 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A condition type that compares a collection of time series +// against a threshold. +type AlertPolicy_Condition_MetricThreshold struct { + // A [filter](/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the + // [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this field. + Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // A [filter](/monitoring/api/v3/filters) that identifies a time + // series that should be used as the denominator of a ratio that will be + // compared with the threshold. If a `denominator_filter` is specified, + // the time series specified by the `filter` field will be used as the + // numerator. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"` + // Specifies the alignment of data points in individual time series + // selected by `denominatorFilter` as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). + // + // When computing ratios, the `aggregations` and + // `denominator_aggregations` fields must use the same alignment period + // and produce time series that have the same periodicity and labels. + // + // This field is similar to the one in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It + // is advisable to use the `ListTimeSeries` method when debugging this + // field. + DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"` + // The comparison to apply between the time series (indicated by `filter` + // and `aggregation`) and the threshold (indicated by `threshold_value`). + // The comparison is applied on each time series, with the time series + // on the left-hand side and the threshold on the right-hand side. + // + // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. + Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` + // A value against which to compare the time series. + ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *duration.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_MetricThreshold) Reset() { *m = AlertPolicy_Condition_MetricThreshold{} } +func (m *AlertPolicy_Condition_MetricThreshold) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_9e390b6dbd40ea14, []int{0, 1, 1} +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Marshal(b, m, deterministic) +} +func (dst *AlertPolicy_Condition_MetricThreshold) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Merge(dst, src) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Size(m) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_MetricThreshold proto.InternalMessageInfo + +func (m *AlertPolicy_Condition_MetricThreshold) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { + if m != nil { + return m.DenominatorFilter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { + if m != nil { + return m.DenominatorAggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { + if m != nil { + return m.Comparison + } + return ComparisonType_COMPARISON_UNSPECIFIED +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { + if m != nil { + return m.ThresholdValue + } + return 0 +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDuration() *duration.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +// A condition type that checks that monitored resources +// are reporting data. The configuration defines a metric and +// a set of monitored resources. The predicate is considered in violation +// when a time series for the specified metric of a monitored +// resource does not include any data in the specified `duration`. +type AlertPolicy_Condition_MetricAbsence struct { + // A [filter](/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the + // one in the [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this field. + Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // The amount of time that a time series must fail to report new + // data to be considered failing. Currently, only values that + // are a multiple of a minute--e.g. 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. + Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_MetricAbsence) Reset() { *m = AlertPolicy_Condition_MetricAbsence{} } +func (m *AlertPolicy_Condition_MetricAbsence) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_9e390b6dbd40ea14, []int{0, 1, 2} +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Marshal(b, m, deterministic) +} +func (dst *AlertPolicy_Condition_MetricAbsence) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Merge(dst, src) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Size(m) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_MetricAbsence proto.InternalMessageInfo + +func (m *AlertPolicy_Condition_MetricAbsence) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetDuration() *duration.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func init() { + proto.RegisterType((*AlertPolicy)(nil), "google.monitoring.v3.AlertPolicy") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.AlertPolicy.UserLabelsEntry") + proto.RegisterType((*AlertPolicy_Documentation)(nil), "google.monitoring.v3.AlertPolicy.Documentation") + proto.RegisterType((*AlertPolicy_Condition)(nil), "google.monitoring.v3.AlertPolicy.Condition") + proto.RegisterType((*AlertPolicy_Condition_Trigger)(nil), "google.monitoring.v3.AlertPolicy.Condition.Trigger") + proto.RegisterType((*AlertPolicy_Condition_MetricThreshold)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricThreshold") + proto.RegisterType((*AlertPolicy_Condition_MetricAbsence)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricAbsence") + proto.RegisterEnum("google.monitoring.v3.AlertPolicy_ConditionCombinerType", AlertPolicy_ConditionCombinerType_name, AlertPolicy_ConditionCombinerType_value) +} + +func init() { + proto.RegisterFile("google/monitoring/v3/alert.proto", fileDescriptor_alert_9e390b6dbd40ea14) +} + +var fileDescriptor_alert_9e390b6dbd40ea14 = []byte{ + // 941 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xeb, 0x6e, 0xe3, 0x44, + 0x14, 0xae, 0x93, 0xe6, 0x76, 0xd2, 0x36, 0xd9, 0xd9, 0xee, 0xae, 0x31, 0x68, 0x95, 0xae, 0x90, + 0x88, 0x40, 0x38, 0x22, 0x01, 0x71, 0x59, 0x81, 0x94, 0x5b, 0x37, 0x11, 0x24, 0xad, 0xa6, 0x69, + 0x91, 0x50, 0x25, 0xcb, 0x71, 0xa6, 0xae, 0x85, 0x3d, 0x63, 0x4d, 0xec, 0xa2, 0xbc, 0x0e, 0x3f, + 0x79, 0x14, 0x1e, 0x81, 0x7f, 0xbc, 0x02, 0xe2, 0x01, 0x90, 0xc7, 0x63, 0xc7, 0xe9, 0xa6, 0xbb, + 0x64, 0xf7, 0x5f, 0xce, 0x9c, 0xef, 0x7c, 0xe7, 0xf6, 0xcd, 0x38, 0xd0, 0xb0, 0x19, 0xb3, 0x5d, + 0xd2, 0xf2, 0x18, 0x75, 0x02, 0xc6, 0x1d, 0x6a, 0xb7, 0xee, 0x3a, 0x2d, 0xd3, 0x25, 0x3c, 0xd0, + 0x7d, 0xce, 0x02, 0x86, 0x8e, 0x63, 0x84, 0xbe, 0x46, 0xe8, 0x77, 0x1d, 0xed, 0x23, 0x19, 0x67, + 0xfa, 0x4e, 0xcb, 0xa4, 0x94, 0x05, 0x66, 0xe0, 0x30, 0xba, 0x8c, 0x63, 0xb4, 0x93, 0xad, 0xac, + 0x16, 0xf3, 0x3c, 0x46, 0x25, 0xe4, 0xd3, 0xad, 0x10, 0x2f, 0x8c, 0x89, 0x0c, 0x4e, 0x2c, 0xc6, + 0x17, 0x12, 0xfb, 0x5c, 0x62, 0x85, 0x35, 0x0f, 0x6f, 0x5a, 0x8b, 0x90, 0x0b, 0xd8, 0x43, 0xfe, + 0xdf, 0xb8, 0xe9, 0xfb, 0x84, 0xcb, 0x72, 0x5e, 0xfc, 0x5d, 0x83, 0x6a, 0x37, 0x6a, 0xe9, 0x9c, + 0xb9, 0x8e, 0xb5, 0x42, 0x08, 0xf6, 0xa9, 0xe9, 0x11, 0x55, 0x69, 0x28, 0xcd, 0x0a, 0x16, 0xbf, + 0xd1, 0x09, 0x1c, 0x2c, 0x9c, 0xa5, 0xef, 0x9a, 0x2b, 0x43, 0xf8, 0x72, 0xc2, 0x57, 0x95, 0x67, + 0xd3, 0x08, 0x72, 0x09, 0x87, 0x0b, 0x66, 0x85, 0x1e, 0xa1, 0x71, 0x91, 0xea, 0x61, 0x43, 0x69, + 0x56, 0xdb, 0x2d, 0x7d, 0xdb, 0x84, 0xf4, 0x4c, 0x42, 0x7d, 0x90, 0x0d, 0xc3, 0x9b, 0x2c, 0x08, + 0x43, 0x35, 0x5c, 0x12, 0x6e, 0xb8, 0xe6, 0x9c, 0xb8, 0x4b, 0xb5, 0xde, 0xc8, 0x37, 0xab, 0xed, + 0x2f, 0xde, 0x4e, 0x7a, 0xb9, 0x24, 0xfc, 0x27, 0x11, 0x33, 0xa4, 0x01, 0x5f, 0x61, 0x08, 0xd3, + 0x03, 0xf4, 0x23, 0x80, 0xc5, 0xe8, 0xc2, 0x11, 0x4b, 0x51, 0x0f, 0x04, 0xe5, 0x67, 0x6f, 0xa7, + 0xec, 0x27, 0x31, 0x38, 0x13, 0x8e, 0x2e, 0xa0, 0x6c, 0x31, 0x6f, 0xee, 0x50, 0xc2, 0xd5, 0x62, + 0x43, 0x69, 0x1e, 0xb5, 0xbf, 0xde, 0x81, 0xaa, 0x2f, 0x43, 0x67, 0x2b, 0x9f, 0xe0, 0x94, 0x08, + 0x7d, 0x09, 0x25, 0x42, 0xcd, 0xb9, 0x4b, 0x16, 0xea, 0x23, 0x31, 0x46, 0x2d, 0xe1, 0x4c, 0xb6, + 0xa8, 0xf7, 0x18, 0x73, 0xaf, 0x4c, 0x37, 0x24, 0x38, 0x81, 0xa2, 0x0e, 0x3c, 0xa1, 0x2c, 0x70, + 0x6e, 0x1c, 0x2b, 0x96, 0x89, 0x75, 0x6b, 0x52, 0x1a, 0x4d, 0xed, 0xa8, 0x91, 0x6f, 0x56, 0xf0, + 0x71, 0xd6, 0xd9, 0x97, 0x3e, 0x34, 0x81, 0x9a, 0xc5, 0x49, 0x56, 0x57, 0x2a, 0x88, 0x94, 0x1f, + 0x6f, 0x6f, 0x63, 0x22, 0x45, 0x88, 0x05, 0x16, 0x1f, 0x25, 0xc1, 0xb1, 0x1d, 0xd1, 0xdd, 0x93, + 0xa9, 0x5a, 0xdd, 0x85, 0xce, 0xdb, 0xb0, 0xb5, 0x53, 0x38, 0xdc, 0x90, 0x07, 0x52, 0xa1, 0x64, + 0x31, 0x1a, 0x10, 0x1a, 0x48, 0x81, 0x26, 0x26, 0xfa, 0x10, 0x2a, 0x9e, 0xe3, 0x11, 0x23, 0x58, + 0xf9, 0x89, 0x40, 0xcb, 0xd1, 0x41, 0x34, 0x5a, 0xed, 0xaf, 0x32, 0x54, 0xd2, 0xa1, 0xa7, 0x12, + 0x3f, 0x78, 0x83, 0xc4, 0x8b, 0xaf, 0x4b, 0x9c, 0xc2, 0xe3, 0x74, 0xf1, 0x46, 0x70, 0xcb, 0xc9, + 0xf2, 0x96, 0xb9, 0x0b, 0x51, 0x47, 0xb5, 0xfd, 0x72, 0x87, 0xad, 0xeb, 0x13, 0x12, 0x70, 0xc7, + 0x9a, 0x25, 0x14, 0xa3, 0x3d, 0x8c, 0x52, 0xe6, 0xf4, 0x14, 0xdd, 0x40, 0x7d, 0x9d, 0xcf, 0x9c, + 0x2f, 0xa3, 0xa6, 0x73, 0x22, 0xd9, 0xb7, 0xbb, 0x27, 0xeb, 0x46, 0xf1, 0x16, 0x19, 0xed, 0xe1, + 0x5a, 0x4a, 0x2a, 0xce, 0x02, 0x6d, 0x08, 0xa5, 0x19, 0x77, 0x6c, 0x9b, 0x70, 0xf4, 0x14, 0x0a, + 0x16, 0x0b, 0xe5, 0x70, 0x0b, 0xa3, 0x3d, 0x1c, 0x9b, 0x48, 0x83, 0x92, 0x4f, 0xb8, 0x95, 0x54, + 0xa0, 0x8c, 0xf6, 0x70, 0x72, 0xd0, 0x2b, 0xc2, 0x7e, 0x34, 0x73, 0xed, 0x9f, 0x3c, 0xd4, 0xee, + 0x35, 0x86, 0x9e, 0x42, 0xf1, 0xc6, 0x71, 0x03, 0xc2, 0xe5, 0x46, 0xa4, 0x85, 0x86, 0x70, 0x60, + 0xda, 0x36, 0x27, 0x76, 0xfc, 0x32, 0xaa, 0x65, 0x71, 0x09, 0x4f, 0x1e, 0x68, 0x6b, 0x8d, 0xc4, + 0x1b, 0x61, 0xe8, 0x73, 0x40, 0x0b, 0x42, 0x99, 0xe7, 0x50, 0x33, 0x60, 0xdc, 0x90, 0xa9, 0x2a, + 0x22, 0xd5, 0xa3, 0x8c, 0xe7, 0x34, 0xce, 0x7a, 0x0d, 0x6a, 0x16, 0xbe, 0x51, 0x01, 0xfc, 0xdf, + 0x0a, 0x9e, 0x65, 0x28, 0xba, 0xd9, 0x62, 0x06, 0xd1, 0xb3, 0xe2, 0xf9, 0x26, 0x77, 0x96, 0x8c, + 0xaa, 0xfb, 0xe2, 0x2d, 0x78, 0x40, 0xf5, 0xfd, 0x14, 0x27, 0x2e, 0x7e, 0x26, 0x0e, 0x7d, 0x02, + 0xb5, 0x54, 0x5a, 0xc6, 0x5d, 0x74, 0xc1, 0xd5, 0x42, 0x34, 0x71, 0x7c, 0x94, 0x1e, 0x8b, 0x6b, + 0x8f, 0xbe, 0x82, 0x72, 0xf2, 0xd2, 0x0b, 0xb1, 0x56, 0xdb, 0x1f, 0xbc, 0xf6, 0x48, 0x0c, 0x24, + 0x00, 0xa7, 0x50, 0x34, 0x81, 0x52, 0x10, 0x2f, 0x5b, 0x2d, 0x89, 0xa8, 0xce, 0x2e, 0x5a, 0x92, + 0x3a, 0xc1, 0x09, 0x87, 0xf6, 0xaf, 0x02, 0x87, 0x1b, 0x02, 0xcb, 0xac, 0x5c, 0x79, 0xe3, 0xca, + 0x0b, 0xef, 0xb6, 0xf2, 0x6c, 0xdb, 0xb9, 0x77, 0x6a, 0x3b, 0xff, 0xfe, 0x6d, 0xf7, 0xaa, 0x50, + 0x49, 0x6f, 0x91, 0xf6, 0x3d, 0xd4, 0xee, 0x7d, 0x6e, 0x50, 0x1d, 0xf2, 0xbf, 0x92, 0x95, 0x9c, + 0x40, 0xf4, 0x13, 0x1d, 0x43, 0x21, 0xde, 0x66, 0x7c, 0x11, 0x62, 0xe3, 0xbb, 0xdc, 0x37, 0xca, + 0x0b, 0x13, 0x9e, 0x6c, 0xfd, 0x1e, 0xa0, 0x67, 0xf0, 0xb8, 0x7f, 0x36, 0xe9, 0x8d, 0xa7, 0x43, + 0xe3, 0x72, 0x7a, 0x71, 0x3e, 0xec, 0x8f, 0x4f, 0xc7, 0xc3, 0x41, 0x7d, 0x0f, 0x95, 0x20, 0xdf, + 0x9d, 0x0e, 0xea, 0x0a, 0x2a, 0x42, 0xee, 0x0c, 0xd7, 0x73, 0xe8, 0x39, 0x68, 0xdd, 0xe9, 0xc0, + 0xf8, 0x79, 0x3c, 0x1b, 0x19, 0x93, 0xee, 0xac, 0x3f, 0x1a, 0x4f, 0x5f, 0x19, 0x78, 0x78, 0x71, + 0x76, 0x89, 0xfb, 0xc3, 0x7a, 0xbe, 0xf7, 0xbb, 0x02, 0xaa, 0xc5, 0xbc, 0xad, 0x2d, 0xf7, 0x20, + 0xee, 0x39, 0x1a, 0xde, 0xb9, 0xf2, 0xcb, 0x0f, 0x12, 0x63, 0x33, 0xd7, 0xa4, 0xb6, 0xce, 0xb8, + 0xdd, 0xb2, 0x09, 0x15, 0xa3, 0x6d, 0xc5, 0x2e, 0xd3, 0x77, 0x96, 0x9b, 0xff, 0x4c, 0x5e, 0xae, + 0xad, 0x3f, 0x72, 0xda, 0xab, 0x98, 0xa0, 0xef, 0xb2, 0x70, 0xa1, 0x4f, 0xd6, 0xa9, 0xae, 0x3a, + 0x7f, 0x26, 0xce, 0x6b, 0xe1, 0xbc, 0x5e, 0x3b, 0xaf, 0xaf, 0x3a, 0xf3, 0xa2, 0x48, 0xd2, 0xf9, + 0x2f, 0x00, 0x00, 0xff, 0xff, 0x66, 0xb5, 0x16, 0x64, 0x76, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go new file mode 100644 index 000000000..ad711c915 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go @@ -0,0 +1,667 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert_service.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The protocol for the `CreateAlertPolicy` request. +type CreateAlertPolicyRequest struct { + // The project in which to create the alerting policy. The format is + // `projects/[PROJECT_ID]`. + // + // Note that this field names the parent container in which the alerting + // policy will be written, not the name of the created policy. The alerting + // policy that is returned will have a name that contains a normalized + // representation of this name as a prefix but adds a suffix of the form + // `/alertPolicies/[POLICY_ID]`, identifying the policy in the container. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The requested alerting policy. You should omit the `name` field in this + // policy. The name will be returned in the new policy, including + // a new [ALERT_POLICY_ID] value. + AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateAlertPolicyRequest) Reset() { *m = CreateAlertPolicyRequest{} } +func (m *CreateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateAlertPolicyRequest) ProtoMessage() {} +func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_service_4a748eee76dab644, []int{0} +} +func (m *CreateAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateAlertPolicyRequest.Unmarshal(m, b) +} +func (m *CreateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (dst *CreateAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateAlertPolicyRequest.Merge(dst, src) +} +func (m *CreateAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_CreateAlertPolicyRequest.Size(m) +} +func (m *CreateAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateAlertPolicyRequest proto.InternalMessageInfo + +func (m *CreateAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `GetAlertPolicy` request. +type GetAlertPolicyRequest struct { + // The alerting policy to retrieve. The format is + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAlertPolicyRequest) Reset() { *m = GetAlertPolicyRequest{} } +func (m *GetAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*GetAlertPolicyRequest) ProtoMessage() {} +func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_service_4a748eee76dab644, []int{1} +} +func (m *GetAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAlertPolicyRequest.Unmarshal(m, b) +} +func (m *GetAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (dst *GetAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAlertPolicyRequest.Merge(dst, src) +} +func (m *GetAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_GetAlertPolicyRequest.Size(m) +} +func (m *GetAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAlertPolicyRequest proto.InternalMessageInfo + +func (m *GetAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListAlertPolicies` request. +type ListAlertPoliciesRequest struct { + // The project whose alert policies are to be listed. The format is + // + // projects/[PROJECT_ID] + // + // Note that this field names the parent container in which the alerting + // policies to be listed are stored. To retrieve a single alerting policy + // by name, use the + // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // alert policies to be included in the response. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of field references as the `filter` field. Entries can be + // prefixed with a minus sign to sort by the field in descending order. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAlertPoliciesRequest) Reset() { *m = ListAlertPoliciesRequest{} } +func (m *ListAlertPoliciesRequest) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesRequest) ProtoMessage() {} +func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_service_4a748eee76dab644, []int{2} +} +func (m *ListAlertPoliciesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAlertPoliciesRequest.Unmarshal(m, b) +} +func (m *ListAlertPoliciesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAlertPoliciesRequest.Marshal(b, m, deterministic) +} +func (dst *ListAlertPoliciesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAlertPoliciesRequest.Merge(dst, src) +} +func (m *ListAlertPoliciesRequest) XXX_Size() int { + return xxx_messageInfo_ListAlertPoliciesRequest.Size(m) +} +func (m *ListAlertPoliciesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListAlertPoliciesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAlertPoliciesRequest proto.InternalMessageInfo + +func (m *ListAlertPoliciesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListAlertPoliciesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListAlertPolicies` response. +type ListAlertPoliciesResponse struct { + // The returned alert policies. + AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"` + // If there might be more results than were returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAlertPoliciesResponse) Reset() { *m = ListAlertPoliciesResponse{} } +func (m *ListAlertPoliciesResponse) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesResponse) ProtoMessage() {} +func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_service_4a748eee76dab644, []int{3} +} +func (m *ListAlertPoliciesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAlertPoliciesResponse.Unmarshal(m, b) +} +func (m *ListAlertPoliciesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAlertPoliciesResponse.Marshal(b, m, deterministic) +} +func (dst *ListAlertPoliciesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAlertPoliciesResponse.Merge(dst, src) +} +func (m *ListAlertPoliciesResponse) XXX_Size() int { + return xxx_messageInfo_ListAlertPoliciesResponse.Size(m) +} +func (m *ListAlertPoliciesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListAlertPoliciesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAlertPoliciesResponse proto.InternalMessageInfo + +func (m *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { + if m != nil { + return m.AlertPolicies + } + return nil +} + +func (m *ListAlertPoliciesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The protocol for the `UpdateAlertPolicy` request. +type UpdateAlertPolicyRequest struct { + // Optional. A list of alerting policy field names. If this field is not + // empty, each listed field in the existing alerting policy is set to the + // value of the corresponding field in the supplied policy (`alert_policy`), + // or to the field's default value if the field is not in the supplied + // alerting policy. Fields not listed retain their previous value. + // + // Examples of valid field masks include `display_name`, `documentation`, + // `documentation.content`, `documentation.mime_type`, `user_labels`, + // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. + // + // If this field is empty, then the supplied alerting policy replaces the + // existing policy. It is the same as deleting the existing policy and + // adding the supplied policy, except for the following: + // + // + The new policy will have the same `[ALERT_POLICY_ID]` as the former + // policy. This gives you continuity with the former policy in your + // notifications and incidents. + // + Conditions in the new policy will keep their former `[CONDITION_ID]` if + // the supplied condition includes the `name` field with that + // `[CONDITION_ID]`. If the supplied condition omits the `name` field, + // then a new `[CONDITION_ID]` is created. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. The updated alerting policy or the updated values for the + // fields listed in `update_mask`. + // If `update_mask` is not empty, any fields in this policy that are + // not in `update_mask` are ignored. + AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateAlertPolicyRequest) Reset() { *m = UpdateAlertPolicyRequest{} } +func (m *UpdateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateAlertPolicyRequest) ProtoMessage() {} +func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_service_4a748eee76dab644, []int{4} +} +func (m *UpdateAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateAlertPolicyRequest.Unmarshal(m, b) +} +func (m *UpdateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateAlertPolicyRequest.Merge(dst, src) +} +func (m *UpdateAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_UpdateAlertPolicyRequest.Size(m) +} +func (m *UpdateAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateAlertPolicyRequest proto.InternalMessageInfo + +func (m *UpdateAlertPolicyRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `DeleteAlertPolicy` request. +type DeleteAlertPolicyRequest struct { + // The alerting policy to delete. The format is: + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + // + // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteAlertPolicyRequest) Reset() { *m = DeleteAlertPolicyRequest{} } +func (m *DeleteAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteAlertPolicyRequest) ProtoMessage() {} +func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_alert_service_4a748eee76dab644, []int{5} +} +func (m *DeleteAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteAlertPolicyRequest.Unmarshal(m, b) +} +func (m *DeleteAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteAlertPolicyRequest.Merge(dst, src) +} +func (m *DeleteAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_DeleteAlertPolicyRequest.Size(m) +} +func (m *DeleteAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteAlertPolicyRequest proto.InternalMessageInfo + +func (m *DeleteAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*CreateAlertPolicyRequest)(nil), "google.monitoring.v3.CreateAlertPolicyRequest") + proto.RegisterType((*GetAlertPolicyRequest)(nil), "google.monitoring.v3.GetAlertPolicyRequest") + proto.RegisterType((*ListAlertPoliciesRequest)(nil), "google.monitoring.v3.ListAlertPoliciesRequest") + proto.RegisterType((*ListAlertPoliciesResponse)(nil), "google.monitoring.v3.ListAlertPoliciesResponse") + proto.RegisterType((*UpdateAlertPolicyRequest)(nil), "google.monitoring.v3.UpdateAlertPolicyRequest") + proto.RegisterType((*DeleteAlertPolicyRequest)(nil), "google.monitoring.v3.DeleteAlertPolicyRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// AlertPolicyServiceClient is the client API for AlertPolicyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AlertPolicyServiceClient interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) +} + +type alertPolicyServiceClient struct { + cc *grpc.ClientConn +} + +func NewAlertPolicyServiceClient(cc *grpc.ClientConn) AlertPolicyServiceClient { + return &alertPolicyServiceClient{cc} +} + +func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { + out := new(ListAlertPoliciesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AlertPolicyServiceServer is the server API for AlertPolicyService service. +type AlertPolicyServiceServer interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*empty.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) +} + +func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { + s.RegisterService(&_AlertPolicyService_serviceDesc, srv) +} + +func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAlertPoliciesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.AlertPolicyService", + HandlerType: (*AlertPolicyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListAlertPolicies", + Handler: _AlertPolicyService_ListAlertPolicies_Handler, + }, + { + MethodName: "GetAlertPolicy", + Handler: _AlertPolicyService_GetAlertPolicy_Handler, + }, + { + MethodName: "CreateAlertPolicy", + Handler: _AlertPolicyService_CreateAlertPolicy_Handler, + }, + { + MethodName: "DeleteAlertPolicy", + Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, + }, + { + MethodName: "UpdateAlertPolicy", + Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/alert_service.proto", +} + +func init() { + proto.RegisterFile("google/monitoring/v3/alert_service.proto", fileDescriptor_alert_service_4a748eee76dab644) +} + +var fileDescriptor_alert_service_4a748eee76dab644 = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x95, 0x93, 0x36, 0x5f, 0xbb, 0xfd, 0x5a, 0x94, 0x15, 0x54, 0xae, 0x0b, 0x52, 0x30, 0x2a, + 0x54, 0xad, 0xb0, 0xa5, 0xf8, 0x04, 0x15, 0x48, 0xa4, 0x85, 0xf6, 0x40, 0xa5, 0x28, 0x85, 0x1e, + 0x50, 0xa4, 0x68, 0x93, 0x4c, 0xac, 0x25, 0x8e, 0xd7, 0x78, 0x37, 0x11, 0x29, 0xea, 0x85, 0x23, + 0x12, 0xe2, 0xc0, 0x99, 0x03, 0x47, 0x38, 0x20, 0x7e, 0x07, 0x57, 0xfe, 0x02, 0x3f, 0x04, 0x79, + 0xed, 0x34, 0x76, 0x6d, 0xab, 0x16, 0xb7, 0xcc, 0xce, 0xdb, 0x99, 0xb7, 0x6f, 0xde, 0x38, 0x68, + 0xdb, 0x66, 0xcc, 0x76, 0xc0, 0x1c, 0x31, 0x97, 0x0a, 0xe6, 0x53, 0xd7, 0x36, 0x27, 0x96, 0x49, + 0x1c, 0xf0, 0x45, 0x87, 0x83, 0x3f, 0xa1, 0x3d, 0x30, 0x3c, 0x9f, 0x09, 0x86, 0xaf, 0x87, 0x48, + 0x63, 0x8e, 0x34, 0x26, 0x96, 0x76, 0x33, 0xba, 0x4f, 0x3c, 0x6a, 0x12, 0xd7, 0x65, 0x82, 0x08, + 0xca, 0x5c, 0x1e, 0xde, 0xd1, 0x6a, 0xf9, 0xd5, 0x23, 0xc4, 0x66, 0x84, 0x90, 0x51, 0x77, 0x3c, + 0x30, 0x61, 0xe4, 0x89, 0xe9, 0xa5, 0xeb, 0x17, 0xc9, 0x01, 0x05, 0xa7, 0xdf, 0x19, 0x11, 0x3e, + 0x0c, 0x11, 0xba, 0x40, 0xea, 0xbe, 0x0f, 0x44, 0xc0, 0x93, 0xa0, 0x66, 0x93, 0x39, 0xb4, 0x37, + 0x6d, 0xc1, 0x9b, 0x31, 0x70, 0x81, 0x31, 0x5a, 0x70, 0xc9, 0x08, 0xd4, 0x72, 0x4d, 0xd9, 0x5e, + 0x6e, 0xc9, 0xdf, 0xf8, 0x00, 0xfd, 0x1f, 0xbe, 0xcd, 0x93, 0x50, 0xb5, 0x54, 0x53, 0xb6, 0x57, + 0xea, 0xb7, 0x8d, 0xac, 0xb7, 0x19, 0xf1, 0x9a, 0x2b, 0x64, 0x1e, 0xe8, 0xbb, 0xe8, 0xc6, 0x21, + 0x88, 0x62, 0x2d, 0xf5, 0x2f, 0x0a, 0x52, 0x9f, 0x53, 0x1e, 0x83, 0x53, 0xe0, 0x97, 0x2f, 0x2c, + 0xc4, 0x38, 0xae, 0xa3, 0xca, 0x80, 0x3a, 0x02, 0x7c, 0x75, 0x51, 0x9e, 0x46, 0x11, 0xde, 0x40, + 0x4b, 0xcc, 0xef, 0x83, 0xdf, 0xe9, 0x4e, 0xd5, 0x8a, 0xcc, 0xfc, 0x27, 0xe3, 0xc6, 0x14, 0x6f, + 0xa2, 0x65, 0x8f, 0xd8, 0xd0, 0xe1, 0xf4, 0x0c, 0xe4, 0x9b, 0x16, 0x5b, 0x4b, 0xc1, 0xc1, 0x09, + 0x3d, 0x03, 0x7c, 0x0b, 0x21, 0x99, 0x14, 0x6c, 0x08, 0x6e, 0x44, 0x4d, 0xc2, 0x5f, 0x04, 0x07, + 0xfa, 0x47, 0x05, 0x6d, 0x64, 0xf0, 0xe3, 0x1e, 0x73, 0x39, 0xe0, 0x23, 0xb4, 0x16, 0x13, 0x8c, + 0x02, 0x57, 0xcb, 0xb5, 0x72, 0x31, 0xc9, 0x56, 0x49, 0xbc, 0x22, 0xbe, 0x8b, 0xae, 0xb9, 0xf0, + 0x56, 0x74, 0x62, 0x5c, 0x4a, 0x92, 0xcb, 0x6a, 0x70, 0xdc, 0xbc, 0xe0, 0x13, 0xe8, 0xf5, 0xd2, + 0xeb, 0x67, 0xcf, 0x74, 0x0f, 0xad, 0x8c, 0x65, 0x4e, 0x9a, 0x20, 0x1a, 0x9f, 0x36, 0xe3, 0x32, + 0xf3, 0x89, 0xf1, 0x2c, 0xf0, 0xc9, 0x31, 0xe1, 0xc3, 0x16, 0x0a, 0xe1, 0xc1, 0xef, 0xd4, 0xf0, + 0xcb, 0xff, 0x34, 0x7c, 0x03, 0xa9, 0x07, 0xe0, 0x40, 0x51, 0xcb, 0xd5, 0x7f, 0x54, 0x10, 0x8e, + 0x41, 0x4f, 0xc2, 0xa5, 0xc2, 0x5f, 0x15, 0x54, 0x4d, 0xc9, 0x8e, 0x8d, 0x6c, 0x32, 0x79, 0xfe, + 0xd1, 0xcc, 0xc2, 0xf8, 0x70, 0x9e, 0xfa, 0xee, 0xfb, 0xdf, 0x7f, 0x3e, 0x97, 0xb6, 0xf0, 0x9d, + 0x60, 0x11, 0xdf, 0x05, 0x04, 0x1f, 0x79, 0x3e, 0x7b, 0x0d, 0x3d, 0xc1, 0xcd, 0x9d, 0x73, 0x33, + 0x39, 0xb2, 0x4f, 0x0a, 0x5a, 0x4b, 0x1a, 0x1d, 0xef, 0x66, 0x37, 0xcc, 0x5c, 0x07, 0xed, 0x6a, + 0x69, 0xf5, 0xfb, 0x92, 0xcf, 0x3d, 0xbc, 0x95, 0xc5, 0x27, 0x49, 0xc7, 0xdc, 0x39, 0x97, 0xaa, + 0xa5, 0x16, 0x3e, 0x4f, 0xb5, 0xbc, 0x2f, 0x43, 0x11, 0x5e, 0x0f, 0x24, 0x2f, 0x4b, 0x2f, 0xa2, + 0xd3, 0xc3, 0x84, 0xad, 0xf0, 0x07, 0x05, 0x55, 0x53, 0x0e, 0xc9, 0xe3, 0x98, 0x67, 0x25, 0x6d, + 0x3d, 0x65, 0xea, 0xa7, 0xc1, 0x97, 0x71, 0x26, 0xd8, 0x4e, 0x41, 0xc1, 0x7e, 0x2a, 0xa8, 0x9a, + 0xda, 0xa6, 0x3c, 0x32, 0x79, 0x6b, 0x57, 0x44, 0xb0, 0x23, 0xc9, 0xab, 0x51, 0xaf, 0x4b, 0x5e, + 0x71, 0x41, 0x8c, 0xab, 0x48, 0x26, 0xf5, 0x6b, 0x7c, 0x53, 0x90, 0xda, 0x63, 0xa3, 0xcc, 0x96, + 0x8d, 0xaa, 0xec, 0x19, 0x2d, 0x51, 0x33, 0x90, 0xa6, 0xa9, 0xbc, 0x7a, 0x1c, 0x41, 0x6d, 0xe6, + 0x10, 0xd7, 0x36, 0x98, 0x6f, 0x9b, 0x36, 0xb8, 0x52, 0x38, 0x33, 0x4c, 0x11, 0x8f, 0xf2, 0xe4, + 0xbf, 0xd0, 0xde, 0x3c, 0xfa, 0x5e, 0xd2, 0x0e, 0xc3, 0x02, 0xfb, 0x0e, 0x1b, 0xf7, 0x8d, 0xe3, + 0x79, 0xc7, 0x53, 0xeb, 0xd7, 0x2c, 0xd9, 0x96, 0xc9, 0xf6, 0x3c, 0xd9, 0x3e, 0xb5, 0xba, 0x15, + 0xd9, 0xc4, 0xfa, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x1f, 0xe6, 0xf0, 0x47, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go new file mode 100644 index 000000000..d2792b520 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go @@ -0,0 +1,887 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/common.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import distribution "google.golang.org/genproto/googleapis/api/distribution" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Specifies an ordering relationship on two arguments, here called left and +// right. +type ComparisonType int32 + +const ( + // No ordering relationship is specified. + ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 + // The left argument is greater than the right argument. + ComparisonType_COMPARISON_GT ComparisonType = 1 + // The left argument is greater than or equal to the right argument. + ComparisonType_COMPARISON_GE ComparisonType = 2 + // The left argument is less than the right argument. + ComparisonType_COMPARISON_LT ComparisonType = 3 + // The left argument is less than or equal to the right argument. + ComparisonType_COMPARISON_LE ComparisonType = 4 + // The left argument is equal to the right argument. + ComparisonType_COMPARISON_EQ ComparisonType = 5 + // The left argument is not equal to the right argument. + ComparisonType_COMPARISON_NE ComparisonType = 6 +) + +var ComparisonType_name = map[int32]string{ + 0: "COMPARISON_UNSPECIFIED", + 1: "COMPARISON_GT", + 2: "COMPARISON_GE", + 3: "COMPARISON_LT", + 4: "COMPARISON_LE", + 5: "COMPARISON_EQ", + 6: "COMPARISON_NE", +} +var ComparisonType_value = map[string]int32{ + "COMPARISON_UNSPECIFIED": 0, + "COMPARISON_GT": 1, + "COMPARISON_GE": 2, + "COMPARISON_LT": 3, + "COMPARISON_LE": 4, + "COMPARISON_EQ": 5, + "COMPARISON_NE": 6, +} + +func (x ComparisonType) String() string { + return proto.EnumName(ComparisonType_name, int32(x)) +} +func (ComparisonType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_common_8becd78c7e1e8452, []int{0} +} + +// The tier of service for a Workspace. Please see the +// [service tiers documentation](https://cloud.google.com/monitoring/workspaces/tiers) +// for more details. +type ServiceTier int32 // Deprecated: Do not use. +const ( + // An invalid sentinel value, used to indicate that a tier has not + // been provided explicitly. + ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 + // The Stackdriver Basic tier, a free tier of service that provides basic + // features, a moderate allotment of logs, and access to built-in metrics. + // A number of features are not available in this tier. For more details, + // see [the service tiers documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 + // The Stackdriver Premium tier, a higher, more expensive tier of service + // that provides access to all Stackdriver features, lets you use Stackdriver + // with AWS accounts, and has a larger allotments for logs and metrics. For + // more details, see [the service tiers documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 +) + +var ServiceTier_name = map[int32]string{ + 0: "SERVICE_TIER_UNSPECIFIED", + 1: "SERVICE_TIER_BASIC", + 2: "SERVICE_TIER_PREMIUM", +} +var ServiceTier_value = map[string]int32{ + "SERVICE_TIER_UNSPECIFIED": 0, + "SERVICE_TIER_BASIC": 1, + "SERVICE_TIER_PREMIUM": 2, +} + +func (x ServiceTier) String() string { + return proto.EnumName(ServiceTier_name, int32(x)) +} +func (ServiceTier) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_common_8becd78c7e1e8452, []int{1} +} + +// The Aligner describes how to bring the data points in a single +// time series into temporal alignment. +type Aggregation_Aligner int32 + +const ( + // No alignment. Raw data is returned. Not valid if cross-time + // series reduction is requested. The value type of the result is + // the same as the value type of the input. + Aggregation_ALIGN_NONE Aggregation_Aligner = 0 + // Align and convert to delta metric type. This alignment is valid + // for cumulative metrics and delta metrics. Aligning an existing + // delta metric to a delta metric requires that the alignment + // period be increased. The value type of the result is the same + // as the value type of the input. + // + // One can think of this aligner as a rate but without time units; that + // is, the output is conceptually (second_point - first_point). + Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 + // Align and convert to a rate. This alignment is valid for + // cumulative metrics and delta metrics with numeric values. The output is a + // gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + // + // One can think of this aligner as conceptually providing the slope of + // the line that passes through the value at the start and end of the + // window. In other words, this is conceptually ((y1 - y0)/(t1 - t0)), + // and the output unit is one that has a "/time" dimension. + // + // If, by rate, you are looking for percentage change, see the + // `ALIGN_PERCENT_CHANGE` aligner option. + Aggregation_ALIGN_RATE Aggregation_Aligner = 2 + // Align by interpolating between adjacent points around the + // period boundary. This alignment is valid for gauge + // metrics with numeric values. The value type of the result is the same + // as the value type of the input. + Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 + // Align by shifting the oldest data point before the period + // boundary to the boundary. This alignment is valid for gauge + // metrics. The value type of the result is the same as the + // value type of the input. + Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 + // Align time series via aggregation. The resulting data point in + // the alignment period is the minimum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // values. The value type of the result is the same as the value + // type of the input. + Aggregation_ALIGN_MIN Aggregation_Aligner = 10 + // Align time series via aggregation. The resulting data point in + // the alignment period is the maximum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // values. The value type of the result is the same as the value + // type of the input. + Aggregation_ALIGN_MAX Aggregation_Aligner = 11 + // Align time series via aggregation. The resulting data point in + // the alignment period is the average or arithmetic mean of all + // data points in the period. This alignment is valid for gauge and delta + // metrics with numeric values. The value type of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // or Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 + // Align time series via aggregation. The resulting data point in + // the alignment period is the sum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // and distribution values. The value type of the output is the + // same as the value type of the input. + Aggregation_ALIGN_SUM Aggregation_Aligner = 14 + // Align time series via aggregation. The resulting data point in + // the alignment period is the standard deviation of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with numeric values. The value type of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of True-valued data points in the + // period. This alignment is valid for gauge metrics with + // Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of False-valued data points in the + // period. This alignment is valid for gauge metrics with + // Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 + // Align time series via aggregation. The resulting data point in + // the alignment period is the fraction of True-valued data points in the + // period. This alignment is valid for gauge metrics with Boolean values. + // The output value is in the range [0, 1] and has value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 99th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 95th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 50th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 5th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 + // Align and convert to a percentage change. This alignment is valid for + // gauge and delta metrics with numeric values. This alignment conceptually + // computes the equivalent of "((current - previous)/previous)*100" + // where previous value is determined based on the alignmentPeriod. + // In the event that previous is 0 the calculated value is infinity with the + // exception that if both (current - previous) and previous are 0 the + // calculated value is 0. + // A 10 minute moving mean is computed at each point of the time window + // prior to the above calculation to smooth the metric and prevent false + // positives from very short lived spikes. + // Only applicable for data that is >= 0. Any values < 0 are treated as + // no data. While delta metrics are accepted by this alignment special care + // should be taken that the values for the metric will always be positive. + // The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 +) + +var Aggregation_Aligner_name = map[int32]string{ + 0: "ALIGN_NONE", + 1: "ALIGN_DELTA", + 2: "ALIGN_RATE", + 3: "ALIGN_INTERPOLATE", + 4: "ALIGN_NEXT_OLDER", + 10: "ALIGN_MIN", + 11: "ALIGN_MAX", + 12: "ALIGN_MEAN", + 13: "ALIGN_COUNT", + 14: "ALIGN_SUM", + 15: "ALIGN_STDDEV", + 16: "ALIGN_COUNT_TRUE", + 24: "ALIGN_COUNT_FALSE", + 17: "ALIGN_FRACTION_TRUE", + 18: "ALIGN_PERCENTILE_99", + 19: "ALIGN_PERCENTILE_95", + 20: "ALIGN_PERCENTILE_50", + 21: "ALIGN_PERCENTILE_05", + 23: "ALIGN_PERCENT_CHANGE", +} +var Aggregation_Aligner_value = map[string]int32{ + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_COUNT_FALSE": 24, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, + "ALIGN_PERCENT_CHANGE": 23, +} + +func (x Aggregation_Aligner) String() string { + return proto.EnumName(Aggregation_Aligner_name, int32(x)) +} +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_common_8becd78c7e1e8452, []int{2, 0} +} + +// A Reducer describes how to aggregate data points from multiple +// time series into a single time series. +type Aggregation_Reducer int32 + +const ( + // No cross-time series reduction. The output of the aligner is + // returned. + Aggregation_REDUCE_NONE Aggregation_Reducer = 0 + // Reduce by computing the mean across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric or distribution values. The value type of the + // output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 + // Reduce by computing the minimum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric values. The value type of the output + // is the same as the value type of the input. + Aggregation_REDUCE_MIN Aggregation_Reducer = 2 + // Reduce by computing the maximum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric values. The value type of the output + // is the same as the value type of the input. + Aggregation_REDUCE_MAX Aggregation_Reducer = 3 + // Reduce by computing the sum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric and distribution values. The value type of + // the output is the same as the value type of the input. + Aggregation_REDUCE_SUM Aggregation_Reducer = 4 + // Reduce by computing the standard deviation across time series + // for each alignment period. This reducer is valid for delta + // and gauge metrics with numeric or distribution values. The value type of + // the output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 + // Reduce by computing the count of data points across time series + // for each alignment period. This reducer is valid for delta + // and gauge metrics of numeric, Boolean, distribution, and string value + // type. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 + // Reduce by computing the count of True-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The value type of + // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the count of False-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The value type of + // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 + // Reduce by computing the fraction of True-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The output value is in the + // range [0, 1] and has value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 + // Reduce by computing 99th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 + // Reduce by computing 95th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 + // Reduce by computing 50th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 + // Reduce by computing 5th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 +) + +var Aggregation_Reducer_name = map[int32]string{ + 0: "REDUCE_NONE", + 1: "REDUCE_MEAN", + 2: "REDUCE_MIN", + 3: "REDUCE_MAX", + 4: "REDUCE_SUM", + 5: "REDUCE_STDDEV", + 6: "REDUCE_COUNT", + 7: "REDUCE_COUNT_TRUE", + 15: "REDUCE_COUNT_FALSE", + 8: "REDUCE_FRACTION_TRUE", + 9: "REDUCE_PERCENTILE_99", + 10: "REDUCE_PERCENTILE_95", + 11: "REDUCE_PERCENTILE_50", + 12: "REDUCE_PERCENTILE_05", +} +var Aggregation_Reducer_value = map[string]int32{ + "REDUCE_NONE": 0, + "REDUCE_MEAN": 1, + "REDUCE_MIN": 2, + "REDUCE_MAX": 3, + "REDUCE_SUM": 4, + "REDUCE_STDDEV": 5, + "REDUCE_COUNT": 6, + "REDUCE_COUNT_TRUE": 7, + "REDUCE_COUNT_FALSE": 15, + "REDUCE_FRACTION_TRUE": 8, + "REDUCE_PERCENTILE_99": 9, + "REDUCE_PERCENTILE_95": 10, + "REDUCE_PERCENTILE_50": 11, + "REDUCE_PERCENTILE_05": 12, +} + +func (x Aggregation_Reducer) String() string { + return proto.EnumName(Aggregation_Reducer_name, int32(x)) +} +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_common_8becd78c7e1e8452, []int{2, 1} +} + +// A single strongly-typed value. +type TypedValue struct { + // The typed value field. + // + // Types that are valid to be assigned to Value: + // *TypedValue_BoolValue + // *TypedValue_Int64Value + // *TypedValue_DoubleValue + // *TypedValue_StringValue + // *TypedValue_DistributionValue + Value isTypedValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypedValue) Reset() { *m = TypedValue{} } +func (m *TypedValue) String() string { return proto.CompactTextString(m) } +func (*TypedValue) ProtoMessage() {} +func (*TypedValue) Descriptor() ([]byte, []int) { + return fileDescriptor_common_8becd78c7e1e8452, []int{0} +} +func (m *TypedValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypedValue.Unmarshal(m, b) +} +func (m *TypedValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypedValue.Marshal(b, m, deterministic) +} +func (dst *TypedValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedValue.Merge(dst, src) +} +func (m *TypedValue) XXX_Size() int { + return xxx_messageInfo_TypedValue.Size(m) +} +func (m *TypedValue) XXX_DiscardUnknown() { + xxx_messageInfo_TypedValue.DiscardUnknown(m) +} + +var xxx_messageInfo_TypedValue proto.InternalMessageInfo + +type isTypedValue_Value interface { + isTypedValue_Value() +} + +type TypedValue_BoolValue struct { + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type TypedValue_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type TypedValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type TypedValue_StringValue struct { + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type TypedValue_DistributionValue struct { + DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +func (*TypedValue_BoolValue) isTypedValue_Value() {} + +func (*TypedValue_Int64Value) isTypedValue_Value() {} + +func (*TypedValue_DoubleValue) isTypedValue_Value() {} + +func (*TypedValue_StringValue) isTypedValue_Value() {} + +func (*TypedValue_DistributionValue) isTypedValue_Value() {} + +func (m *TypedValue) GetValue() isTypedValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *TypedValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*TypedValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *TypedValue) GetInt64Value() int64 { + if x, ok := m.GetValue().(*TypedValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *TypedValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*TypedValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *TypedValue) GetStringValue() string { + if x, ok := m.GetValue().(*TypedValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *TypedValue) GetDistributionValue() *distribution.Distribution { + if x, ok := m.GetValue().(*TypedValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TypedValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TypedValue_OneofMarshaler, _TypedValue_OneofUnmarshaler, _TypedValue_OneofSizer, []interface{}{ + (*TypedValue_BoolValue)(nil), + (*TypedValue_Int64Value)(nil), + (*TypedValue_DoubleValue)(nil), + (*TypedValue_StringValue)(nil), + (*TypedValue_DistributionValue)(nil), + } +} + +func _TypedValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TypedValue) + // value + switch x := m.Value.(type) { + case *TypedValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *TypedValue_Int64Value: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Int64Value)) + case *TypedValue_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *TypedValue_StringValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *TypedValue_DistributionValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DistributionValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TypedValue.Value has unexpected type %T", x) + } + return nil +} + +func _TypedValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TypedValue) + switch tag { + case 1: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &TypedValue_BoolValue{x != 0} + return true, err + case 2: // value.int64_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &TypedValue_Int64Value{int64(x)} + return true, err + case 3: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &TypedValue_DoubleValue{math.Float64frombits(x)} + return true, err + case 4: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &TypedValue_StringValue{x} + return true, err + case 5: // value.distribution_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(distribution.Distribution) + err := b.DecodeMessage(msg) + m.Value = &TypedValue_DistributionValue{msg} + return true, err + default: + return false, nil + } +} + +func _TypedValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TypedValue) + // value + switch x := m.Value.(type) { + case *TypedValue_BoolValue: + n += 1 // tag and wire + n += 1 + case *TypedValue_Int64Value: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Int64Value)) + case *TypedValue_DoubleValue: + n += 1 // tag and wire + n += 8 + case *TypedValue_StringValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *TypedValue_DistributionValue: + s := proto.Size(x.DistributionValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A time interval extending just after a start time through an end time. +// If the start time is the same as the end time, then the interval +// represents a single point in time. +type TimeInterval struct { + // Required. The end of the time interval. + EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Optional. The beginning of the time interval. The default value + // for the start time is the end time. The start time must not be + // later than the end time. + StartTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeInterval) Reset() { *m = TimeInterval{} } +func (m *TimeInterval) String() string { return proto.CompactTextString(m) } +func (*TimeInterval) ProtoMessage() {} +func (*TimeInterval) Descriptor() ([]byte, []int) { + return fileDescriptor_common_8becd78c7e1e8452, []int{1} +} +func (m *TimeInterval) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeInterval.Unmarshal(m, b) +} +func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic) +} +func (dst *TimeInterval) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeInterval.Merge(dst, src) +} +func (m *TimeInterval) XXX_Size() int { + return xxx_messageInfo_TimeInterval.Size(m) +} +func (m *TimeInterval) XXX_DiscardUnknown() { + xxx_messageInfo_TimeInterval.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeInterval proto.InternalMessageInfo + +func (m *TimeInterval) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *TimeInterval) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +// Describes how to combine multiple time series to provide different views of +// the data. Aggregation consists of an alignment step on individual time +// series (`alignment_period` and `per_series_aligner`) followed by an optional +// reduction step of the data across the aligned time series +// (`cross_series_reducer` and `group_by_fields`). For more details, see +// [Aggregation](/monitoring/api/learn_more#aggregation). +type Aggregation struct { + // The alignment period for per-[time series][google.monitoring.v3.TimeSeries] + // alignment. If present, `alignmentPeriod` must be at least 60 + // seconds. After per-time series alignment, each time series will + // contain data points only on the period boundaries. If + // `perSeriesAligner` is not specified or equals `ALIGN_NONE`, then + // this field is ignored. If `perSeriesAligner` is specified and + // does not equal `ALIGN_NONE`, then this field must be defined; + // otherwise an error is returned. + AlignmentPeriod *duration.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` + // The approach to be used to align individual time series. Not all + // alignment functions may be applied to all time series, depending + // on the metric type and value type of the original time + // series. Alignment may change the metric type or the value type of + // the time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `crossSeriesReducer` is specified, then + // `perSeriesAligner` must be specified and not equal `ALIGN_NONE` + // and `alignmentPeriod` must be specified; otherwise, an error is + // returned. + PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` + // The approach to be used to combine time series. Not all reducer + // functions may be applied to all time series, depending on the + // metric type and the value type of the original time + // series. Reduction may change the metric type of value type of the + // time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `crossSeriesReducer` is specified, then + // `perSeriesAligner` must be specified and not equal `ALIGN_NONE` + // and `alignmentPeriod` must be specified; otherwise, an error is + // returned. + CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` + // The set of fields to preserve when `crossSeriesReducer` is + // specified. The `groupByFields` determine how the time series are + // partitioned into subsets prior to applying the aggregation + // function. Each subset contains time series that have the same + // value for each of the grouping fields. Each individual time + // series is a member of exactly one subset. The + // `crossSeriesReducer` is applied to each subset of time series. + // It is not possible to reduce across different resource types, so + // this field implicitly contains `resource.type`. Fields not + // specified in `groupByFields` are aggregated away. If + // `groupByFields` is not specified and all the time series have + // the same resource type, then the time series are aggregated into + // a single output time series. If `crossSeriesReducer` is not + // defined, this field is ignored. + GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Aggregation) Reset() { *m = Aggregation{} } +func (m *Aggregation) String() string { return proto.CompactTextString(m) } +func (*Aggregation) ProtoMessage() {} +func (*Aggregation) Descriptor() ([]byte, []int) { + return fileDescriptor_common_8becd78c7e1e8452, []int{2} +} +func (m *Aggregation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Aggregation.Unmarshal(m, b) +} +func (m *Aggregation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Aggregation.Marshal(b, m, deterministic) +} +func (dst *Aggregation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Aggregation.Merge(dst, src) +} +func (m *Aggregation) XXX_Size() int { + return xxx_messageInfo_Aggregation.Size(m) +} +func (m *Aggregation) XXX_DiscardUnknown() { + xxx_messageInfo_Aggregation.DiscardUnknown(m) +} + +var xxx_messageInfo_Aggregation proto.InternalMessageInfo + +func (m *Aggregation) GetAlignmentPeriod() *duration.Duration { + if m != nil { + return m.AlignmentPeriod + } + return nil +} + +func (m *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { + if m != nil { + return m.PerSeriesAligner + } + return Aggregation_ALIGN_NONE +} + +func (m *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { + if m != nil { + return m.CrossSeriesReducer + } + return Aggregation_REDUCE_NONE +} + +func (m *Aggregation) GetGroupByFields() []string { + if m != nil { + return m.GroupByFields + } + return nil +} + +func init() { + proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue") + proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval") + proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation") + proto.RegisterEnum("google.monitoring.v3.ComparisonType", ComparisonType_name, ComparisonType_value) + proto.RegisterEnum("google.monitoring.v3.ServiceTier", ServiceTier_name, ServiceTier_value) + proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value) + proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value) +} + +func init() { + proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor_common_8becd78c7e1e8452) +} + +var fileDescriptor_common_8becd78c7e1e8452 = []byte{ + // 957 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xc1, 0x6e, 0xe3, 0x44, + 0x18, 0xc7, 0xe3, 0x64, 0xdb, 0x34, 0x9f, 0xdb, 0x66, 0x3a, 0xdb, 0xed, 0x86, 0x68, 0x61, 0xb3, + 0x45, 0x42, 0x61, 0x0f, 0x4e, 0xd5, 0x12, 0xa4, 0x0a, 0x09, 0xc9, 0x75, 0xa6, 0xad, 0xa5, 0xc4, + 0x09, 0x13, 0xa7, 0x54, 0x50, 0xc9, 0x72, 0x9a, 0x59, 0xcb, 0x52, 0xe2, 0xb1, 0x6c, 0xa7, 0x52, + 0x6f, 0xdc, 0x79, 0x07, 0x2e, 0xdc, 0xb8, 0xf1, 0x1a, 0x3c, 0x0c, 0x17, 0x5e, 0x00, 0x79, 0xc6, + 0x59, 0x3b, 0x21, 0x08, 0x8e, 0xdf, 0xef, 0xff, 0xff, 0xbe, 0x99, 0xf9, 0x8f, 0x35, 0x86, 0x77, + 0x1e, 0xe7, 0xde, 0x9c, 0x75, 0x16, 0x3c, 0xf0, 0x13, 0x1e, 0xf9, 0x81, 0xd7, 0x79, 0xba, 0xe8, + 0x3c, 0xf2, 0xc5, 0x82, 0x07, 0x5a, 0x18, 0xf1, 0x84, 0xe3, 0x63, 0x69, 0xd1, 0x72, 0x8b, 0xf6, + 0x74, 0xd1, 0x7c, 0x93, 0x35, 0xba, 0xa1, 0xdf, 0x71, 0x83, 0x80, 0x27, 0x6e, 0xe2, 0xf3, 0x20, + 0x96, 0x3d, 0xcd, 0x4f, 0x0b, 0xea, 0xcc, 0x8f, 0x93, 0xc8, 0x9f, 0x2e, 0x53, 0x3d, 0x93, 0x3f, + 0xcb, 0x64, 0x51, 0x4d, 0x97, 0x1f, 0x3a, 0xb3, 0x65, 0xe4, 0x16, 0xf4, 0xb7, 0x9b, 0x7a, 0xe2, + 0x2f, 0x58, 0x9c, 0xb8, 0x8b, 0x50, 0x1a, 0x4e, 0xff, 0x54, 0x00, 0xec, 0xe7, 0x90, 0xcd, 0xee, + 0xdc, 0xf9, 0x92, 0xe1, 0xb7, 0x00, 0x53, 0xce, 0xe7, 0xce, 0x53, 0x5a, 0x35, 0x94, 0x96, 0xd2, + 0xde, 0xbb, 0x2d, 0xd1, 0x5a, 0xca, 0xa4, 0xe1, 0x1d, 0xa8, 0x7e, 0x90, 0x7c, 0xfd, 0x55, 0xe6, + 0x28, 0xb7, 0x94, 0x76, 0xe5, 0xb6, 0x44, 0x41, 0x40, 0x69, 0xf9, 0x1c, 0xf6, 0x67, 0x7c, 0x39, + 0x9d, 0xb3, 0xcc, 0x53, 0x69, 0x29, 0x6d, 0xe5, 0xb6, 0x44, 0x55, 0x49, 0x3f, 0x9a, 0xd2, 0xc3, + 0x04, 0x5e, 0x66, 0x7a, 0xd1, 0x52, 0xda, 0xb5, 0xd4, 0x24, 0xa9, 0x34, 0x99, 0x80, 0x8b, 0x67, + 0xce, 0xac, 0x3b, 0x2d, 0xa5, 0xad, 0x9e, 0x37, 0xb4, 0x2c, 0x4d, 0x37, 0xf4, 0xb5, 0x5e, 0xc1, + 0x75, 0x5b, 0xa2, 0x47, 0xc5, 0x2e, 0x31, 0xea, 0xaa, 0x0a, 0x3b, 0xa2, 0xfb, 0xf4, 0x27, 0x05, + 0xf6, 0x6d, 0x7f, 0xc1, 0xcc, 0x20, 0x61, 0xd1, 0x93, 0x3b, 0xc7, 0x5d, 0xd8, 0x63, 0xc1, 0xcc, + 0x49, 0x83, 0x11, 0xc7, 0x51, 0xcf, 0x9b, 0xab, 0xd1, 0xab, 0xd4, 0x34, 0x7b, 0x95, 0x1a, 0xad, + 0xb2, 0x60, 0x96, 0x56, 0xf8, 0x12, 0x20, 0x4e, 0xdc, 0x28, 0x91, 0x8d, 0xca, 0x7f, 0x36, 0xd6, + 0x84, 0x3b, 0xad, 0x4f, 0xff, 0xaa, 0x82, 0xaa, 0x7b, 0x5e, 0xc4, 0x3c, 0x71, 0x55, 0xb8, 0x07, + 0xc8, 0x9d, 0xfb, 0x5e, 0xb0, 0x60, 0x41, 0xe2, 0x84, 0x2c, 0xf2, 0xf9, 0x2c, 0x1b, 0xf8, 0xc9, + 0x3f, 0x06, 0xf6, 0xb2, 0xfb, 0xa5, 0xf5, 0x8f, 0x2d, 0x23, 0xd1, 0x81, 0xbf, 0x07, 0x1c, 0xb2, + 0xc8, 0x89, 0x59, 0xe4, 0xb3, 0xd8, 0x11, 0x2a, 0x8b, 0xc4, 0x89, 0x0e, 0xcf, 0xbf, 0xd4, 0xb6, + 0x7d, 0x7a, 0x5a, 0x61, 0x13, 0x9a, 0x2e, 0x1b, 0x28, 0x0a, 0x59, 0x34, 0x16, 0x33, 0x32, 0x82, + 0x7f, 0x84, 0xe3, 0xc7, 0x88, 0xc7, 0xf1, 0x6a, 0x74, 0xc4, 0x66, 0xcb, 0x47, 0x16, 0x89, 0x2b, + 0xfb, 0x5f, 0xa3, 0xa9, 0x6c, 0xa0, 0x58, 0x8c, 0x91, 0xc3, 0x33, 0x86, 0xbf, 0x80, 0xba, 0x17, + 0xf1, 0x65, 0xe8, 0x4c, 0x9f, 0x9d, 0x0f, 0x3e, 0x9b, 0xcf, 0xe2, 0xc6, 0x4e, 0xab, 0xd2, 0xae, + 0xd1, 0x03, 0x81, 0xaf, 0x9e, 0xaf, 0x05, 0x3c, 0xfd, 0xb9, 0x02, 0xd5, 0xd5, 0x86, 0x0e, 0x01, + 0xf4, 0xbe, 0x79, 0x63, 0x39, 0xd6, 0xd0, 0x22, 0xa8, 0x84, 0xeb, 0xa0, 0xca, 0xba, 0x47, 0xfa, + 0xb6, 0x8e, 0x94, 0xdc, 0x40, 0x75, 0x9b, 0xa0, 0x32, 0x7e, 0x05, 0x47, 0xb2, 0x36, 0x2d, 0x9b, + 0xd0, 0xd1, 0xb0, 0x9f, 0xe2, 0x0a, 0x3e, 0x06, 0x94, 0xcd, 0x21, 0xf7, 0xb6, 0x33, 0xec, 0xf7, + 0x08, 0x45, 0x2f, 0xf0, 0x01, 0xd4, 0x24, 0x1d, 0x98, 0x16, 0x82, 0x42, 0xa9, 0xdf, 0x23, 0x35, + 0x1f, 0x3d, 0x20, 0xba, 0x85, 0xf6, 0xf3, 0xb5, 0x8d, 0xe1, 0xc4, 0xb2, 0xd1, 0x41, 0xee, 0x1f, + 0x4f, 0x06, 0xe8, 0x10, 0x23, 0xd8, 0xcf, 0x4a, 0xbb, 0xd7, 0x23, 0x77, 0xa8, 0x9e, 0xaf, 0x2a, + 0x3a, 0x1c, 0x9b, 0x4e, 0x08, 0x42, 0xf9, 0x16, 0x25, 0xbd, 0xd6, 0xfb, 0x63, 0x82, 0x1a, 0xf8, + 0x35, 0xbc, 0x94, 0xf8, 0x9a, 0xea, 0x86, 0x6d, 0x0e, 0x2d, 0xe9, 0x3f, 0xca, 0x85, 0x11, 0xa1, + 0x06, 0xb1, 0x6c, 0xb3, 0x4f, 0x9c, 0xcb, 0x4b, 0x84, 0xb7, 0x0b, 0x5d, 0xf4, 0x72, 0xab, 0xd0, + 0x3d, 0x43, 0xc7, 0x5b, 0x85, 0xb3, 0x2e, 0x7a, 0x85, 0x1b, 0x70, 0xbc, 0x26, 0x38, 0xc6, 0xad, + 0x6e, 0xdd, 0x10, 0xf4, 0xfa, 0xf4, 0xf7, 0x32, 0x54, 0x57, 0x37, 0x58, 0x07, 0x95, 0x92, 0xde, + 0xc4, 0x20, 0x85, 0xeb, 0xc8, 0x80, 0xc8, 0x48, 0x5c, 0xc7, 0x0a, 0x98, 0x16, 0x2a, 0x17, 0x6b, + 0xfd, 0x1e, 0x55, 0x0a, 0x75, 0x9a, 0xd9, 0x0b, 0x7c, 0x04, 0x07, 0xab, 0x5a, 0x86, 0xb6, 0x93, + 0xc6, 0x98, 0x21, 0x99, 0xf3, 0x6e, 0x1a, 0x58, 0x91, 0xc8, 0x5c, 0xaa, 0xf8, 0x04, 0xf0, 0x1a, + 0x96, 0x41, 0xd6, 0xd3, 0xb3, 0x64, 0x7c, 0x3d, 0xc9, 0xbd, 0x82, 0xb2, 0x1e, 0x65, 0xed, 0x5f, + 0x94, 0x2e, 0x82, 0xed, 0x4a, 0xf7, 0x0c, 0xa9, 0xdb, 0x95, 0xb3, 0x2e, 0xda, 0x7f, 0xff, 0x8b, + 0x02, 0x87, 0x06, 0x5f, 0x84, 0x6e, 0xe4, 0xc7, 0x3c, 0x48, 0xdf, 0x5c, 0xdc, 0x84, 0x13, 0x63, + 0x38, 0x18, 0xe9, 0xd4, 0x1c, 0x0f, 0x2d, 0x67, 0x62, 0x8d, 0x47, 0xc4, 0x30, 0xaf, 0x4d, 0xd2, + 0x43, 0xa5, 0x34, 0x84, 0x82, 0x76, 0x63, 0x23, 0x65, 0x13, 0xa5, 0x5f, 0xf6, 0x3a, 0xea, 0xdb, + 0xa8, 0xb2, 0x89, 0x88, 0x0c, 0xb4, 0x80, 0xc8, 0x77, 0x68, 0x67, 0x03, 0x59, 0x04, 0xed, 0xbe, + 0x77, 0x41, 0x1d, 0xb3, 0xe8, 0xc9, 0x7f, 0x64, 0xb6, 0xcf, 0x22, 0xfc, 0x06, 0x1a, 0x63, 0x42, + 0xef, 0x4c, 0x83, 0x38, 0xb6, 0x49, 0xe8, 0xc6, 0xf6, 0x4e, 0x00, 0xaf, 0xa9, 0x57, 0xfa, 0xd8, + 0x34, 0x90, 0x92, 0x9e, 0x7f, 0x8d, 0x8f, 0x28, 0x19, 0x98, 0x93, 0x01, 0x2a, 0x37, 0xcb, 0x0d, + 0xe5, 0xea, 0x57, 0x05, 0x1a, 0x8f, 0x7c, 0xb1, 0xf5, 0xc9, 0xb8, 0x52, 0x0d, 0xf1, 0xb3, 0x1c, + 0xa5, 0x4f, 0xdd, 0x48, 0xf9, 0xe1, 0xdb, 0xcc, 0xe4, 0xf1, 0xb9, 0x1b, 0x78, 0x1a, 0x8f, 0xbc, + 0x8e, 0xc7, 0x02, 0xf1, 0x10, 0x76, 0xa4, 0xe4, 0x86, 0x7e, 0xbc, 0xfe, 0xbf, 0xfd, 0x26, 0xaf, + 0x7e, 0x2b, 0x37, 0x6f, 0xe4, 0x00, 0x63, 0xce, 0x97, 0x33, 0x6d, 0x90, 0xaf, 0x75, 0x77, 0xf1, + 0xc7, 0x4a, 0x7c, 0x10, 0xe2, 0x43, 0x2e, 0x3e, 0xdc, 0x5d, 0x4c, 0x77, 0xc5, 0x22, 0x17, 0x7f, + 0x07, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x78, 0xd9, 0x96, 0xd3, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go new file mode 100644 index 000000000..04fe18243 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/dropped_labels.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A set of (label, value) pairs which were dropped during aggregation, attached +// to google.api.Distribution.Exemplars in google.api.Distribution values during +// aggregation. +// +// These values are used in combination with the label values that remain on the +// aggregated Distribution timeseries to construct the full label set for the +// exemplar values. The resulting full label set may be used to identify the +// specific task/job/instance (for example) which may be contributing to a +// long-tail, while allowing the storage savings of only storing aggregated +// distribution values for a large group. +// +// Note that there are no guarantees on ordering of the labels from +// exemplar-to-exemplar and from distribution-to-distribution in the same +// stream, and there may be duplicates. It is up to clients to resolve any +// ambiguities. +type DroppedLabels struct { + // Map from label to its value, for all labels dropped in any aggregation. + Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DroppedLabels) Reset() { *m = DroppedLabels{} } +func (m *DroppedLabels) String() string { return proto.CompactTextString(m) } +func (*DroppedLabels) ProtoMessage() {} +func (*DroppedLabels) Descriptor() ([]byte, []int) { + return fileDescriptor_dropped_labels_bce6164464203600, []int{0} +} +func (m *DroppedLabels) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DroppedLabels.Unmarshal(m, b) +} +func (m *DroppedLabels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DroppedLabels.Marshal(b, m, deterministic) +} +func (dst *DroppedLabels) XXX_Merge(src proto.Message) { + xxx_messageInfo_DroppedLabels.Merge(dst, src) +} +func (m *DroppedLabels) XXX_Size() int { + return xxx_messageInfo_DroppedLabels.Size(m) +} +func (m *DroppedLabels) XXX_DiscardUnknown() { + xxx_messageInfo_DroppedLabels.DiscardUnknown(m) +} + +var xxx_messageInfo_DroppedLabels proto.InternalMessageInfo + +func (m *DroppedLabels) GetLabel() map[string]string { + if m != nil { + return m.Label + } + return nil +} + +func init() { + proto.RegisterType((*DroppedLabels)(nil), "google.monitoring.v3.DroppedLabels") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.DroppedLabels.LabelEntry") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/dropped_labels.proto", fileDescriptor_dropped_labels_bce6164464203600) +} + +var fileDescriptor_dropped_labels_bce6164464203600 = []byte{ + // 219 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0x4f, 0x29, 0xca, 0x2f, 0x28, 0x48, 0x4d, 0x89, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0x29, 0xd6, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd5, 0x43, 0x28, 0xd5, 0x2b, 0x33, 0x96, + 0x92, 0x81, 0x1a, 0x90, 0x58, 0x90, 0xa9, 0x9f, 0x98, 0x97, 0x97, 0x5f, 0x92, 0x58, 0x92, 0x99, + 0x9f, 0x07, 0xd5, 0xa3, 0xd4, 0xcf, 0xc8, 0xc5, 0xeb, 0x02, 0x31, 0xcc, 0x07, 0x6c, 0x96, 0x90, + 0x0b, 0x17, 0x2b, 0xd8, 0x54, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x3d, 0x3d, 0x6c, 0xa6, + 0xea, 0xa1, 0xe8, 0xd1, 0x03, 0x53, 0xae, 0x79, 0x25, 0x45, 0x95, 0x41, 0x10, 0xcd, 0x52, 0x16, + 0x5c, 0x5c, 0x08, 0x41, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0xce, 0x20, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, + 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, 0x39, 0x44, 0xd9, 0x41, 0x6d, 0x4c, 0xcf, 0xcf, 0x49, + 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f, 0xcd, 0x03, 0xbb, 0x57, 0x1f, 0x22, 0x95, + 0x58, 0x90, 0x59, 0x8c, 0x1a, 0x22, 0xd6, 0x08, 0x5e, 0x12, 0x1b, 0x58, 0xa9, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0x7e, 0x29, 0xf8, 0x00, 0x3b, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go new file mode 100644 index 000000000..e4f6948a7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The description of a dynamic collection of monitored resources. Each group +// has a filter that is matched against monitored resources and their associated +// metadata. If a group's filter matches an available monitored resource, then +// that resource is a member of that group. Groups can contain any number of +// monitored resources, and each monitored resource can be a member of any +// number of groups. +// +// Groups can be nested in parent-child hierarchies. The `parentName` field +// identifies an optional parent for each group. If a group has a parent, then +// the only monitored resources available to be matched by the group's filter +// are the resources contained in the parent group. In other words, a group +// contains the monitored resources that match its filter and the filters of all +// the group's ancestors. A group without a parent can contain any monitored +// resource. +// +// For example, consider an infrastructure running a set of instances with two +// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, +// `environment="production"`. A child of that parent group has a filter, +// `role="transcoder"`. The parent group contains all instances in the +// production environment, regardless of their roles. The child group contains +// instances that have the transcoder role *and* are in the production +// environment. +// +// The monitored resources contained in a group can change at any moment, +// depending on what resources exist and what filters are associated with the +// group and its ancestors. +type Group struct { + // Output only. The name of this group. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + // When creating a group, this field is ignored and a new name is created + // consisting of the project specified in the call to `CreateGroup` + // and a unique `{group_id}` that is generated automatically. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A user-assigned name for this group, used only for display purposes. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The name of the group's parent, if it has one. + // The format is `"projects/{project_id_or_number}/groups/{group_id}"`. + // For groups with no parent, `parentName` is the empty string, `""`. + ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` + // The filter used to determine which monitored resources belong to this group. + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // If true, the members of this group are considered to be a cluster. + // The system can perform additional analysis on groups that are clusters. + IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Group) Reset() { *m = Group{} } +func (m *Group) String() string { return proto.CompactTextString(m) } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_group_da6702aeb6854c5f, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Group.Unmarshal(m, b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) +} +func (dst *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(dst, src) +} +func (m *Group) XXX_Size() int { + return xxx_messageInfo_Group.Size(m) +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *Group) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Group) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Group) GetParentName() string { + if m != nil { + return m.ParentName + } + return "" +} + +func (m *Group) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *Group) GetIsCluster() bool { + if m != nil { + return m.IsCluster + } + return false +} + +func init() { + proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor_group_da6702aeb6854c5f) +} + +var fileDescriptor_group_da6702aeb6854c5f = []byte{ + // 261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x2b, 0x31, + 0x14, 0x87, 0x49, 0xef, 0xed, 0x60, 0x4f, 0x5d, 0x0d, 0x22, 0x83, 0x20, 0x8e, 0xae, 0xba, 0xca, + 0x2c, 0xb2, 0x14, 0x5c, 0xb4, 0x8b, 0xae, 0x94, 0xd2, 0x45, 0x17, 0x32, 0x50, 0x62, 0x1b, 0x43, + 0x20, 0x93, 0x13, 0x92, 0x99, 0x82, 0x2f, 0xe2, 0x03, 0xb8, 0xf4, 0x51, 0x7c, 0x2a, 0x99, 0x93, + 0x91, 0x41, 0x70, 0x97, 0xf3, 0xfb, 0x3e, 0x72, 0xfe, 0x40, 0xa9, 0x11, 0xb5, 0x55, 0x55, 0x83, + 0xce, 0xb4, 0x18, 0x8c, 0xd3, 0xd5, 0x49, 0x54, 0x3a, 0x60, 0xe7, 0xb9, 0x0f, 0xd8, 0x62, 0x7e, + 0x91, 0x0c, 0x3e, 0x1a, 0xfc, 0x24, 0xee, 0xde, 0x19, 0x4c, 0xd7, 0xbd, 0x95, 0xe7, 0xf0, 0xdf, + 0xc9, 0x46, 0x15, 0xac, 0x64, 0x8b, 0xd9, 0x96, 0xde, 0xf9, 0x2d, 0x9c, 0x1f, 0x4d, 0xf4, 0x56, + 0xbe, 0xed, 0x89, 0x4d, 0x88, 0xcd, 0x87, 0xec, 0xa9, 0x57, 0x6e, 0x60, 0xee, 0x65, 0x50, 0xae, + 0x4d, 0xc6, 0x3f, 0x32, 0x20, 0x45, 0x24, 0x5c, 0x42, 0xf6, 0x6a, 0x6c, 0xab, 0x42, 0x31, 0x25, + 0x36, 0x54, 0xf9, 0x35, 0x80, 0x89, 0xfb, 0x83, 0xed, 0x62, 0xcf, 0xb2, 0x92, 0x2d, 0xce, 0xb6, + 0x33, 0x13, 0x57, 0x29, 0x58, 0x7e, 0x30, 0x28, 0x0e, 0xd8, 0xf0, 0xbf, 0xa6, 0x5e, 0x02, 0x8d, + 0xbc, 0xe9, 0xf7, 0xda, 0xb0, 0xe7, 0x87, 0xc1, 0xd1, 0x68, 0xa5, 0xd3, 0x1c, 0x83, 0xae, 0xb4, + 0x72, 0xb4, 0x75, 0x95, 0x90, 0xf4, 0x26, 0xfe, 0x3e, 0xcd, 0xfd, 0x58, 0x7d, 0x4e, 0xae, 0xd6, + 0xe9, 0x83, 0x95, 0xc5, 0xee, 0xc8, 0x1f, 0xc7, 0x56, 0x3b, 0xf1, 0xf5, 0x03, 0x6b, 0x82, 0xf5, + 0x08, 0xeb, 0x9d, 0x78, 0xc9, 0xa8, 0x89, 0xf8, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x95, 0xd1, 0xa1, + 0x34, 0x7e, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go new file mode 100644 index 000000000..d17da9a79 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go @@ -0,0 +1,941 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group_service.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `ListGroup` request. +type ListGroupsRequest struct { + // The project whose groups are to be listed. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // An optional filter consisting of a single group name. The filters limit the + // groups returned based on their parent-child relationship with the specified + // group. If no filter is specified, all groups are returned. + // + // Types that are valid to be assigned to Filter: + // *ListGroupsRequest_ChildrenOfGroup + // *ListGroupsRequest_AncestorsOfGroup + // *ListGroupsRequest_DescendantsOfGroup + Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} } +func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupsRequest) ProtoMessage() {} +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{0} +} +func (m *ListGroupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupsRequest.Unmarshal(m, b) +} +func (m *ListGroupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupsRequest.Marshal(b, m, deterministic) +} +func (dst *ListGroupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupsRequest.Merge(dst, src) +} +func (m *ListGroupsRequest) XXX_Size() int { + return xxx_messageInfo_ListGroupsRequest.Size(m) +} +func (m *ListGroupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupsRequest proto.InternalMessageInfo + +func (m *ListGroupsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type isListGroupsRequest_Filter interface { + isListGroupsRequest_Filter() +} + +type ListGroupsRequest_ChildrenOfGroup struct { + ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_AncestorsOfGroup struct { + AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_DescendantsOfGroup struct { + DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"` +} + +func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} + +func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ListGroupsRequest) GetChildrenOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { + return x.ChildrenOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetAncestorsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { + return x.AncestorsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetDescendantsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { + return x.DescendantsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ListGroupsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ListGroupsRequest_OneofMarshaler, _ListGroupsRequest_OneofUnmarshaler, _ListGroupsRequest_OneofSizer, []interface{}{ + (*ListGroupsRequest_ChildrenOfGroup)(nil), + (*ListGroupsRequest_AncestorsOfGroup)(nil), + (*ListGroupsRequest_DescendantsOfGroup)(nil), + } +} + +func _ListGroupsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ListGroupsRequest) + // filter + switch x := m.Filter.(type) { + case *ListGroupsRequest_ChildrenOfGroup: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ChildrenOfGroup) + case *ListGroupsRequest_AncestorsOfGroup: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AncestorsOfGroup) + case *ListGroupsRequest_DescendantsOfGroup: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DescendantsOfGroup) + case nil: + default: + return fmt.Errorf("ListGroupsRequest.Filter has unexpected type %T", x) + } + return nil +} + +func _ListGroupsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ListGroupsRequest) + switch tag { + case 2: // filter.children_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_ChildrenOfGroup{x} + return true, err + case 3: // filter.ancestors_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_AncestorsOfGroup{x} + return true, err + case 4: // filter.descendants_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_DescendantsOfGroup{x} + return true, err + default: + return false, nil + } +} + +func _ListGroupsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ListGroupsRequest) + // filter + switch x := m.Filter.(type) { + case *ListGroupsRequest_ChildrenOfGroup: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ChildrenOfGroup))) + n += len(x.ChildrenOfGroup) + case *ListGroupsRequest_AncestorsOfGroup: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.AncestorsOfGroup))) + n += len(x.AncestorsOfGroup) + case *ListGroupsRequest_DescendantsOfGroup: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.DescendantsOfGroup))) + n += len(x.DescendantsOfGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The `ListGroups` response. +type ListGroupsResponse struct { + // The groups that match the specified filters. + Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} } +func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupsResponse) ProtoMessage() {} +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{1} +} +func (m *ListGroupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupsResponse.Unmarshal(m, b) +} +func (m *ListGroupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupsResponse.Marshal(b, m, deterministic) +} +func (dst *ListGroupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupsResponse.Merge(dst, src) +} +func (m *ListGroupsResponse) XXX_Size() int { + return xxx_messageInfo_ListGroupsResponse.Size(m) +} +func (m *ListGroupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupsResponse proto.InternalMessageInfo + +func (m *ListGroupsResponse) GetGroup() []*Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *ListGroupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetGroup` request. +type GetGroupRequest struct { + // The group to retrieve. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} } +func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) } +func (*GetGroupRequest) ProtoMessage() {} +func (*GetGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{2} +} +func (m *GetGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetGroupRequest.Unmarshal(m, b) +} +func (m *GetGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetGroupRequest.Marshal(b, m, deterministic) +} +func (dst *GetGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGroupRequest.Merge(dst, src) +} +func (m *GetGroupRequest) XXX_Size() int { + return xxx_messageInfo_GetGroupRequest.Size(m) +} +func (m *GetGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetGroupRequest proto.InternalMessageInfo + +func (m *GetGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateGroup` request. +type CreateGroupRequest struct { + // The project in which to create the group. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // A group definition. It is an error to define the `name` field because + // the system assigns the name. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not create the group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} } +func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*CreateGroupRequest) ProtoMessage() {} +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{3} +} +func (m *CreateGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateGroupRequest.Unmarshal(m, b) +} +func (m *CreateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateGroupRequest.Marshal(b, m, deterministic) +} +func (dst *CreateGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateGroupRequest.Merge(dst, src) +} +func (m *CreateGroupRequest) XXX_Size() int { + return xxx_messageInfo_CreateGroupRequest.Size(m) +} +func (m *CreateGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateGroupRequest proto.InternalMessageInfo + +func (m *CreateGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *CreateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `UpdateGroup` request. +type UpdateGroupRequest struct { + // The new definition of the group. All fields of the existing group, + // excepting `name`, are replaced with the corresponding fields of this group. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not update the existing group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} } +func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateGroupRequest) ProtoMessage() {} +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{4} +} +func (m *UpdateGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateGroupRequest.Unmarshal(m, b) +} +func (m *UpdateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateGroupRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateGroupRequest.Merge(dst, src) +} +func (m *UpdateGroupRequest) XXX_Size() int { + return xxx_messageInfo_UpdateGroupRequest.Size(m) +} +func (m *UpdateGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateGroupRequest proto.InternalMessageInfo + +func (m *UpdateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *UpdateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `DeleteGroup` request. You can only delete a group if it has no children. +type DeleteGroupRequest struct { + // The group to delete. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} } +func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteGroupRequest) ProtoMessage() {} +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{5} +} +func (m *DeleteGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteGroupRequest.Unmarshal(m, b) +} +func (m *DeleteGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteGroupRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteGroupRequest.Merge(dst, src) +} +func (m *DeleteGroupRequest) XXX_Size() int { + return xxx_messageInfo_DeleteGroupRequest.Size(m) +} +func (m *DeleteGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteGroupRequest proto.InternalMessageInfo + +func (m *DeleteGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListGroupMembers` request. +type ListGroupMembersRequest struct { + // The group whose members are listed. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // An optional [list filter](/monitoring/api/learn_more#filtering) describing + // the members to be returned. The filter may reference the type, labels, and + // metadata of monitored resources that comprise the group. + // For example, to return only resources representing Compute Engine VM + // instances, use this filter: + // + // resource.type = "gce_instance" + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // An optional time interval for which results should be returned. Only + // members that were part of the group during the specified interval are + // included in the response. If no interval is provided then the group + // membership over the last minute is returned. + Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} } +func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersRequest) ProtoMessage() {} +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{6} +} +func (m *ListGroupMembersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupMembersRequest.Unmarshal(m, b) +} +func (m *ListGroupMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupMembersRequest.Marshal(b, m, deterministic) +} +func (dst *ListGroupMembersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupMembersRequest.Merge(dst, src) +} +func (m *ListGroupMembersRequest) XXX_Size() int { + return xxx_messageInfo_ListGroupMembersRequest.Size(m) +} +func (m *ListGroupMembersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupMembersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupMembersRequest proto.InternalMessageInfo + +func (m *ListGroupMembersRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListGroupMembersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupMembersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListGroupMembersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListGroupMembersRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +// The `ListGroupMembers` response. +type ListGroupMembersResponse struct { + // A set of monitored resources in the group. + Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` + // If there are more results than have been returned, then this field is + // set to a non-empty value. To see the additional results, use that value as + // `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of elements matching this request. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} } +func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersResponse) ProtoMessage() {} +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_group_service_c051ad810375e8ee, []int{7} +} +func (m *ListGroupMembersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupMembersResponse.Unmarshal(m, b) +} +func (m *ListGroupMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupMembersResponse.Marshal(b, m, deterministic) +} +func (dst *ListGroupMembersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupMembersResponse.Merge(dst, src) +} +func (m *ListGroupMembersResponse) XXX_Size() int { + return xxx_messageInfo_ListGroupMembersResponse.Size(m) +} +func (m *ListGroupMembersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupMembersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupMembersResponse proto.InternalMessageInfo + +func (m *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource { + if m != nil { + return m.Members + } + return nil +} + +func (m *ListGroupMembersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListGroupMembersResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +func init() { + proto.RegisterType((*ListGroupsRequest)(nil), "google.monitoring.v3.ListGroupsRequest") + proto.RegisterType((*ListGroupsResponse)(nil), "google.monitoring.v3.ListGroupsResponse") + proto.RegisterType((*GetGroupRequest)(nil), "google.monitoring.v3.GetGroupRequest") + proto.RegisterType((*CreateGroupRequest)(nil), "google.monitoring.v3.CreateGroupRequest") + proto.RegisterType((*UpdateGroupRequest)(nil), "google.monitoring.v3.UpdateGroupRequest") + proto.RegisterType((*DeleteGroupRequest)(nil), "google.monitoring.v3.DeleteGroupRequest") + proto.RegisterType((*ListGroupMembersRequest)(nil), "google.monitoring.v3.ListGroupMembersRequest") + proto.RegisterType((*ListGroupMembersResponse)(nil), "google.monitoring.v3.ListGroupMembersResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GroupServiceClient is the client API for GroupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GroupServiceClient interface { + // Lists the existing groups. + ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Creates a new group. + CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Deletes an existing group. + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) +} + +type groupServiceClient struct { + cc *grpc.ClientConn +} + +func NewGroupServiceClient(cc *grpc.ClientConn) GroupServiceClient { + return &groupServiceClient{cc} +} + +func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { + out := new(ListGroupsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { + out := new(ListGroupMembersResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupServiceServer is the server API for GroupService service. +type GroupServiceServer interface { + // Lists the existing groups. + ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(context.Context, *GetGroupRequest) (*Group, error) + // Creates a new group. + CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) + // Deletes an existing group. + DeleteGroup(context.Context, *DeleteGroupRequest) (*empty.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) +} + +func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { + s.RegisterService(&_GroupService_serviceDesc, srv) +} + +func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).CreateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).DeleteGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroupMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.GroupService", + HandlerType: (*GroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroups", + Handler: _GroupService_ListGroups_Handler, + }, + { + MethodName: "GetGroup", + Handler: _GroupService_GetGroup_Handler, + }, + { + MethodName: "CreateGroup", + Handler: _GroupService_CreateGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _GroupService_UpdateGroup_Handler, + }, + { + MethodName: "DeleteGroup", + Handler: _GroupService_DeleteGroup_Handler, + }, + { + MethodName: "ListGroupMembers", + Handler: _GroupService_ListGroupMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/group_service.proto", +} + +func init() { + proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor_group_service_c051ad810375e8ee) +} + +var fileDescriptor_group_service_c051ad810375e8ee = []byte{ + // 826 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0x7e, 0xdd, 0xa4, 0x69, 0xb2, 0x69, 0xd5, 0x76, 0x55, 0xf5, 0x8d, 0xdc, 0x0f, 0x05, 0xf7, + 0x83, 0xa8, 0x50, 0x5b, 0x24, 0x07, 0x24, 0x10, 0x3d, 0xb4, 0xa0, 0x82, 0x44, 0xd5, 0xca, 0x2d, + 0x3d, 0xa0, 0x4a, 0x91, 0x9b, 0x4c, 0x8c, 0xc1, 0xde, 0x35, 0xf6, 0x26, 0xd0, 0xa2, 0x4a, 0x80, + 0xc4, 0x81, 0x33, 0x37, 0x6e, 0x1c, 0xe1, 0x2f, 0x70, 0xe2, 0xca, 0x95, 0xbf, 0xc0, 0xff, 0x00, + 0x79, 0xbd, 0x9b, 0x38, 0x9f, 0xed, 0x85, 0x5b, 0xb2, 0xf3, 0x8c, 0x9f, 0x67, 0x66, 0x9f, 0x99, + 0x45, 0x25, 0x9b, 0x52, 0xdb, 0x05, 0xc3, 0xa3, 0xc4, 0x61, 0x34, 0x70, 0x88, 0x6d, 0xb4, 0x2a, + 0x86, 0x1d, 0xd0, 0xa6, 0x5f, 0x0d, 0x21, 0x68, 0x39, 0x35, 0xd0, 0xfd, 0x80, 0x32, 0x8a, 0xe7, + 0x62, 0xa4, 0xde, 0x41, 0xea, 0xad, 0x8a, 0xba, 0x28, 0xf2, 0x2d, 0xdf, 0x31, 0x2c, 0x42, 0x28, + 0xb3, 0x98, 0x43, 0x49, 0x18, 0xe7, 0xa8, 0x2b, 0x89, 0xa8, 0xc8, 0x83, 0x7a, 0x35, 0x80, 0x90, + 0x36, 0x03, 0xf9, 0x61, 0xf5, 0xda, 0x40, 0x09, 0x35, 0xea, 0x79, 0x94, 0x08, 0x48, 0x71, 0xb8, + 0x4a, 0x81, 0x58, 0x10, 0x08, 0xfe, 0xef, 0xb4, 0xd9, 0x30, 0xc0, 0xf3, 0xd9, 0x59, 0x1c, 0xd4, + 0xfe, 0x28, 0x68, 0xf6, 0xb1, 0x13, 0xb2, 0xdd, 0x28, 0x21, 0x34, 0xe1, 0x65, 0x13, 0x42, 0x86, + 0x31, 0x4a, 0x13, 0xcb, 0x83, 0xc2, 0x44, 0x51, 0x29, 0xe5, 0x4c, 0xfe, 0x1b, 0xdf, 0x44, 0xb3, + 0xb5, 0x67, 0x8e, 0x5b, 0x0f, 0x80, 0x54, 0x69, 0xa3, 0xca, 0x19, 0x0a, 0x63, 0x11, 0xe0, 0xe1, + 0x7f, 0xe6, 0xb4, 0x0c, 0xed, 0x37, 0xf8, 0x97, 0xb0, 0x8e, 0xb0, 0x45, 0x6a, 0x10, 0x32, 0x1a, + 0x84, 0x1d, 0x78, 0x4a, 0xc0, 0x67, 0xda, 0x31, 0x89, 0x2f, 0xa3, 0xb9, 0x3a, 0x84, 0x35, 0x20, + 0x75, 0x8b, 0xb0, 0x44, 0x46, 0x5a, 0x64, 0xe0, 0x44, 0x54, 0xe6, 0x2c, 0xa0, 0x9c, 0x6f, 0xd9, + 0x50, 0x0d, 0x9d, 0x73, 0x28, 0x8c, 0x17, 0x95, 0xd2, 0xb8, 0x99, 0x8d, 0x0e, 0x0e, 0x9d, 0x73, + 0xc0, 0x4b, 0x08, 0xf1, 0x20, 0xa3, 0x2f, 0x80, 0x14, 0x32, 0xbc, 0x10, 0x0e, 0x3f, 0x8a, 0x0e, + 0xb6, 0xb3, 0x28, 0xd3, 0x70, 0x5c, 0x06, 0x81, 0x46, 0x11, 0x4e, 0x36, 0x20, 0xf4, 0x29, 0x09, + 0x01, 0xdf, 0x42, 0xe3, 0xb1, 0x00, 0xa5, 0x98, 0x2a, 0xe5, 0xcb, 0x0b, 0xfa, 0xa0, 0x2b, 0xd6, + 0x79, 0x92, 0x19, 0x23, 0xf1, 0x3a, 0x9a, 0x26, 0xf0, 0x9a, 0x55, 0x13, 0xb4, 0xbc, 0x3d, 0xe6, + 0x54, 0x74, 0x7c, 0x20, 0xa9, 0xb5, 0x35, 0x34, 0xbd, 0x0b, 0x31, 0x5f, 0x6f, 0xbf, 0x53, 0x9d, + 0x7e, 0x6b, 0x6f, 0x15, 0x84, 0x77, 0x02, 0xb0, 0x18, 0x0c, 0x84, 0xa6, 0x13, 0x57, 0xd3, 0x16, + 0x1b, 0xf1, 0x5d, 0x4d, 0xec, 0x0a, 0x9a, 0x6a, 0x59, 0xae, 0x53, 0xb7, 0x18, 0x54, 0x29, 0x71, + 0xcf, 0x38, 0x75, 0xd6, 0x9c, 0x94, 0x87, 0xfb, 0xc4, 0x3d, 0xd3, 0x5c, 0x84, 0x9f, 0xf8, 0xf5, + 0x5e, 0x05, 0xff, 0x8a, 0xad, 0x84, 0xf0, 0x7d, 0x70, 0x61, 0x48, 0xbd, 0xc9, 0xd6, 0xfc, 0x50, + 0xd0, 0xff, 0xed, 0x3b, 0xdb, 0x03, 0xef, 0x14, 0x82, 0x91, 0xd6, 0xed, 0x32, 0x4a, 0x6a, 0xa4, + 0x51, 0xd2, 0x3d, 0x46, 0xc1, 0xf3, 0xd2, 0x28, 0xdc, 0x61, 0x39, 0x53, 0xfc, 0xc3, 0x5b, 0x28, + 0xeb, 0x10, 0x06, 0x41, 0xcb, 0x72, 0xb9, 0xbb, 0xf2, 0x65, 0x6d, 0x70, 0x23, 0x8e, 0x1c, 0x0f, + 0x1e, 0x09, 0xa4, 0xd9, 0xce, 0xd1, 0x3e, 0x2b, 0xa8, 0xd0, 0x5f, 0x83, 0x70, 0xdf, 0x6d, 0x34, + 0xe1, 0xc5, 0x47, 0xc2, 0x7f, 0x4b, 0xf2, 0xdb, 0x96, 0xef, 0xe8, 0x7b, 0x72, 0x5d, 0x98, 0x62, + 0x5b, 0x98, 0x12, 0x7d, 0x55, 0x0f, 0x46, 0x45, 0x33, 0xca, 0x2c, 0x37, 0xd9, 0x92, 0x1c, 0x3f, + 0x89, 0x7a, 0x52, 0xfe, 0x9e, 0x41, 0x93, 0x5c, 0xd8, 0x61, 0xbc, 0xe7, 0xf0, 0x07, 0x05, 0xa1, + 0xce, 0x94, 0xe0, 0xeb, 0x83, 0x4b, 0xed, 0x5b, 0x24, 0x6a, 0xe9, 0x72, 0x60, 0x5c, 0xb2, 0xb6, + 0xfa, 0xfe, 0xd7, 0xef, 0x4f, 0x63, 0xcb, 0x78, 0x31, 0x5a, 0x5f, 0x6f, 0xa2, 0x6b, 0xbb, 0xe7, + 0x07, 0xf4, 0x39, 0xd4, 0x58, 0x68, 0x6c, 0x5c, 0xc4, 0x0b, 0x2d, 0xc4, 0x2d, 0x94, 0x95, 0xb3, + 0x83, 0xd7, 0x86, 0x18, 0xaf, 0x7b, 0xb6, 0xd4, 0x51, 0xfe, 0xd4, 0xd6, 0x39, 0x6b, 0x11, 0x2f, + 0x0f, 0x62, 0x15, 0xa4, 0xc6, 0xc6, 0x05, 0x7e, 0xa7, 0xa0, 0x7c, 0x62, 0x18, 0xf1, 0x90, 0xba, + 0xfa, 0xe7, 0x75, 0x34, 0xfd, 0x0d, 0x4e, 0xbf, 0xa6, 0x8d, 0x2c, 0xfa, 0x8e, 0x18, 0xa2, 0x8f, + 0x0a, 0xca, 0x27, 0xc6, 0x71, 0x98, 0x86, 0xfe, 0x89, 0x1d, 0xad, 0xa1, 0xc2, 0x35, 0x6c, 0xaa, + 0xab, 0x5c, 0x43, 0xfc, 0x70, 0x0c, 0x6d, 0x84, 0xd4, 0xf2, 0x0a, 0xe5, 0x13, 0xb3, 0x3a, 0x4c, + 0x4a, 0xff, 0x38, 0xab, 0xf3, 0x12, 0x29, 0x5f, 0x23, 0xfd, 0x41, 0xf4, 0x1a, 0xc9, 0x8b, 0xd8, + 0xb8, 0xec, 0x22, 0xbe, 0x28, 0x68, 0xa6, 0x77, 0x6c, 0xf0, 0xe6, 0x25, 0x2e, 0xeb, 0x5e, 0x11, + 0xaa, 0x7e, 0x55, 0xb8, 0xb0, 0xa6, 0xce, 0xb5, 0x95, 0xf0, 0xfa, 0x68, 0x6d, 0x86, 0x18, 0xc2, + 0xed, 0xaf, 0x0a, 0x2a, 0xd4, 0xa8, 0x37, 0x90, 0x65, 0x7b, 0x36, 0x39, 0x57, 0x07, 0x51, 0x13, + 0x0e, 0x94, 0xa7, 0x5b, 0x02, 0x6a, 0x53, 0xd7, 0x22, 0xb6, 0x4e, 0x03, 0xdb, 0xb0, 0x81, 0xf0, + 0x16, 0x19, 0x71, 0xc8, 0xf2, 0x9d, 0xb0, 0xfb, 0x8d, 0xbf, 0xdb, 0xf9, 0xf7, 0x6d, 0x4c, 0xdd, + 0x8d, 0x3f, 0xb0, 0xe3, 0xd2, 0x66, 0x5d, 0x2e, 0x88, 0x88, 0xf1, 0xb8, 0xf2, 0x53, 0x06, 0x4f, + 0x78, 0xf0, 0xa4, 0x13, 0x3c, 0x39, 0xae, 0x9c, 0x66, 0x38, 0x49, 0xe5, 0x6f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x86, 0x94, 0xf2, 0xde, 0xed, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go new file mode 100644 index 000000000..d01dc19d8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go @@ -0,0 +1,232 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import _ "google.golang.org/genproto/googleapis/api/label" +import metric "google.golang.org/genproto/googleapis/api/metric" +import monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A single data point in a time series. +type Point struct { + // The time interval to which the data point applies. For `GAUGE` metrics, + // only the end time of the interval is used. For `DELTA` metrics, the start + // and end time should specify a non-zero interval, with subsequent points + // specifying contiguous and non-overlapping intervals. For `CUMULATIVE` + // metrics, the start and end time should specify a non-zero interval, with + // subsequent points specifying the same start time and increasing end times, + // until an event resets the cumulative value to zero and sets a new start + // time for the following points. + Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // The value of the data point. + Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_27948d2d9b5c61d2, []int{0} +} +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (dst *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(dst, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *Point) GetValue() *TypedValue { + if m != nil { + return m.Value + } + return nil +} + +// A collection of data points that describes the time-varying values +// of a metric. A time series is identified by a combination of a +// fully-specified monitored resource and a fully-specified metric. +// This type is used for both listing and creating time series. +type TimeSeries struct { + // The associated metric. A fully-specified metric used to identify the time + // series. + Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"` + // The associated monitored resource. Custom metrics can use only certain + // monitored resource types in their time series data. + Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Output only. The associated monitored resource metadata. When reading a + // a timeseries, this field will include metadata labels that are explicitly + // named in the reduction. When creating a timeseries, this field is ignored. + Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The metric kind of the time series. When listing time series, this metric + // kind might be different from the metric kind of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the metric kind of the associated metric. If the associated + // metric's descriptor must be auto-created, then this field specifies the + // metric kind of the new descriptor and must be either `GAUGE` (the default) + // or `CUMULATIVE`. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The value type of the time series. When listing time series, this value + // type might be different from the value type of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the type of the data in the `points` field. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The data points of this time series. When listing time series, points are + // returned in reverse time order. + // + // When creating a time series, this field must contain exactly one point and + // the point's type must be the same as the value type of the associated + // metric. If the associated metric's descriptor must be auto-created, then + // the value type of the descriptor is determined by the point's type, which + // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. + Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_27948d2d9b5c61d2, []int{1} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (dst *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(dst, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetMetric() *metric.Metric { + if m != nil { + return m.Metric + } + return nil +} + +func (m *TimeSeries) GetResource() *monitoredres.MonitoredResource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return metric.MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return metric.MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +func init() { + proto.RegisterType((*Point)(nil), "google.monitoring.v3.Point") + proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor_metric_27948d2d9b5c61d2) +} + +var fileDescriptor_metric_27948d2d9b5c61d2 = []byte{ + // 441 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x51, 0xab, 0xd3, 0x30, + 0x14, 0xc7, 0xe9, 0xae, 0x9b, 0x33, 0x03, 0x1f, 0x82, 0x68, 0x99, 0x0a, 0x73, 0xa2, 0x0e, 0x1f, + 0x5a, 0x58, 0x41, 0x10, 0xe1, 0x82, 0x57, 0x45, 0x45, 0x2e, 0x8c, 0x28, 0x7b, 0x90, 0xc1, 0xc8, + 0x6d, 0x0f, 0x25, 0xd8, 0xe4, 0x84, 0x34, 0x2b, 0xdc, 0x27, 0x3f, 0x8c, 0x6f, 0x7e, 0x14, 0x3f, + 0x93, 0x0f, 0xd2, 0x24, 0xdd, 0x76, 0xb1, 0xf7, 0xbe, 0xb5, 0xf9, 0xff, 0xfe, 0xe7, 0x7f, 0x72, + 0x72, 0xc8, 0x93, 0x12, 0xb1, 0xac, 0x20, 0x95, 0xa8, 0x84, 0x45, 0x23, 0x54, 0x99, 0x36, 0x59, + 0x2a, 0xc1, 0x1a, 0x91, 0x27, 0xda, 0xa0, 0x45, 0x7a, 0xcf, 0x23, 0xc9, 0x01, 0x49, 0x9a, 0x6c, + 0xfa, 0x28, 0x18, 0xb9, 0x16, 0x29, 0x57, 0x0a, 0x2d, 0xb7, 0x02, 0x55, 0xed, 0x3d, 0xd3, 0xfb, + 0x47, 0x6a, 0xc5, 0x2f, 0xa0, 0x0a, 0xe7, 0x0f, 0x8e, 0xce, 0x8f, 0x43, 0xa6, 0x4f, 0x8f, 0x05, + 0x1f, 0x04, 0xc5, 0xd6, 0x40, 0x8d, 0x3b, 0x93, 0x43, 0x80, 0xfa, 0x9b, 0xcd, 0x51, 0x4a, 0x54, + 0x1e, 0x99, 0xff, 0x24, 0xc3, 0x15, 0x0a, 0x65, 0xe9, 0x29, 0x19, 0x0b, 0x65, 0xc1, 0x34, 0xbc, + 0x8a, 0xa3, 0x59, 0xb4, 0x98, 0x2c, 0xe7, 0x49, 0xdf, 0x45, 0x92, 0x6f, 0x42, 0xc2, 0xe7, 0x40, + 0xb2, 0xbd, 0x87, 0xbe, 0x22, 0xc3, 0x86, 0x57, 0x3b, 0x88, 0x07, 0xce, 0x3c, 0xbb, 0xc6, 0x7c, + 0xa9, 0xa1, 0x58, 0xb7, 0x1c, 0xf3, 0xf8, 0xfc, 0xef, 0x80, 0x90, 0xb6, 0xe4, 0x57, 0x30, 0x02, + 0x6a, 0xfa, 0x92, 0x8c, 0xfc, 0x3d, 0x43, 0x13, 0xb4, 0xab, 0xc3, 0xb5, 0x48, 0xce, 0x9d, 0xc2, + 0x02, 0x41, 0x5f, 0x93, 0x71, 0x77, 0xe1, 0x90, 0xfa, 0xf8, 0x0a, 0xdd, 0x8d, 0x85, 0x05, 0x88, + 0xed, 0x71, 0xfa, 0x96, 0x8c, 0x25, 0x58, 0x5e, 0x70, 0xcb, 0xe3, 0xdb, 0xce, 0xfa, 0xec, 0x46, + 0xeb, 0x79, 0x80, 0xd9, 0xde, 0x46, 0x3f, 0x91, 0x89, 0xef, 0x63, 0xfb, 0x43, 0xa8, 0x22, 0x3e, + 0x99, 0x45, 0x8b, 0xbb, 0xcb, 0x17, 0xff, 0xb7, 0xfb, 0x1e, 0xea, 0xdc, 0x08, 0x6d, 0xd1, 0x84, + 0x83, 0x2f, 0x42, 0x15, 0x8c, 0xc8, 0xfd, 0x37, 0xfd, 0x40, 0x88, 0x9b, 0xc5, 0xd6, 0x5e, 0x6a, + 0x88, 0x6f, 0xb9, 0x42, 0xcf, 0x6f, 0x2c, 0xe4, 0x26, 0xd8, 0xce, 0x92, 0xdd, 0x69, 0xba, 0x4f, + 0x9a, 0x91, 0x91, 0x6e, 0x9f, 0xb2, 0x8e, 0x87, 0xb3, 0x93, 0xc5, 0x64, 0xf9, 0xb0, 0xff, 0x09, + 0xdc, 0x73, 0xb3, 0x80, 0x9e, 0xfd, 0x8a, 0x48, 0x9c, 0xa3, 0xec, 0x45, 0xcf, 0x26, 0x3e, 0x78, + 0xd5, 0x6e, 0xca, 0x2a, 0xfa, 0x7e, 0x1a, 0xa0, 0x12, 0x2b, 0xae, 0xca, 0x04, 0x4d, 0x99, 0x96, + 0xa0, 0xdc, 0x1e, 0xa5, 0x5e, 0xe2, 0x5a, 0xd4, 0x57, 0xb7, 0xed, 0xcd, 0xe1, 0xef, 0xf7, 0x60, + 0xfa, 0xd1, 0x17, 0x78, 0x57, 0xe1, 0xae, 0xe8, 0x86, 0xdc, 0x66, 0xad, 0xb3, 0x3f, 0x9d, 0xb8, + 0x71, 0xe2, 0xe6, 0x20, 0x6e, 0xd6, 0xd9, 0xc5, 0xc8, 0x85, 0x64, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x5a, 0x88, 0xc9, 0x0b, 0x7e, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go new file mode 100644 index 000000000..38b5c770f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go @@ -0,0 +1,1208 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric_service.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/ptypes/duration" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import metric "google.golang.org/genproto/googleapis/api/metric" +import monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" +import status "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Controls which fields are returned by `ListTimeSeries`. +type ListTimeSeriesRequest_TimeSeriesView int32 + +const ( + // Returns the identity of the metric(s), the time series, + // and the time series data. + ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 + // Returns the identity of the metric and the time series resource, + // but not the time series data. + ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 +) + +var ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ + 0: "FULL", + 1: "HEADERS", +} +var ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ + "FULL": 0, + "HEADERS": 1, +} + +func (x ListTimeSeriesRequest_TimeSeriesView) String() string { + return proto.EnumName(ListTimeSeriesRequest_TimeSeriesView_name, int32(x)) +} +func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{8, 0} +} + +// The `ListMonitoredResourceDescriptors` request. +type ListMonitoredResourceDescriptorsRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // An optional [filter](/monitoring/api/v3/filters) describing + // the descriptors to be returned. The filter can reference + // the descriptor's type and labels. For example, the + // following filter returns only Google Compute Engine descriptors + // that have an `id` label: + // + // resource.type = starts_with("gce_") AND resource.label:id + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMonitoredResourceDescriptorsRequest) Reset() { + *m = ListMonitoredResourceDescriptorsRequest{} +} +func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{0} +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Marshal(b, m, deterministic) +} +func (dst *ListMonitoredResourceDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Merge(dst, src) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Size(m) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMonitoredResourceDescriptorsRequest proto.InternalMessageInfo + +func (m *ListMonitoredResourceDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMonitoredResourceDescriptors` response. +type ListMonitoredResourceDescriptorsResponse struct { + // The monitored resource descriptors that are available to this project + // and that match `filter`, if present. + ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMonitoredResourceDescriptorsResponse) Reset() { + *m = ListMonitoredResourceDescriptorsResponse{} +} +func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{1} +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Marshal(b, m, deterministic) +} +func (dst *ListMonitoredResourceDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Merge(dst, src) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Size(m) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMonitoredResourceDescriptorsResponse proto.InternalMessageInfo + +func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor { + if m != nil { + return m.ResourceDescriptors + } + return nil +} + +func (m *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMonitoredResourceDescriptor` request. +type GetMonitoredResourceDescriptorRequest struct { + // The monitored resource descriptor to get. The format is + // `"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}"`. + // The `{resource_type}` is a predefined type, such as + // `cloudsql_database`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMonitoredResourceDescriptorRequest) Reset() { *m = GetMonitoredResourceDescriptorRequest{} } +func (m *GetMonitoredResourceDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} +func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{2} +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Unmarshal(m, b) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Marshal(b, m, deterministic) +} +func (dst *GetMonitoredResourceDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Merge(dst, src) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Size(m) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMonitoredResourceDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMonitoredResourceDescriptorRequest proto.InternalMessageInfo + +func (m *GetMonitoredResourceDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListMetricDescriptors` request. +type ListMetricDescriptorsRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If this field is empty, all custom and + // system-defined metric descriptors are returned. + // Otherwise, the [filter](/monitoring/api/v3/filters) + // specifies which metric descriptors are to be + // returned. For example, the following filter matches all + // [custom metrics](/monitoring/custom-metrics): + // + // metric.type = starts_with("custom.googleapis.com/") + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} } +func (m *ListMetricDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsRequest) ProtoMessage() {} +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{3} +} +func (m *ListMetricDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMetricDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListMetricDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMetricDescriptorsRequest.Marshal(b, m, deterministic) +} +func (dst *ListMetricDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsRequest.Merge(dst, src) +} +func (m *ListMetricDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListMetricDescriptorsRequest.Size(m) +} +func (m *ListMetricDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsRequest proto.InternalMessageInfo + +func (m *ListMetricDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMetricDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMetricDescriptors` response. +type ListMetricDescriptorsResponse struct { + // The metric descriptors that are available to the project + // and that match the value of `filter`, if present. + MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} } +func (m *ListMetricDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsResponse) ProtoMessage() {} +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{4} +} +func (m *ListMetricDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMetricDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListMetricDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMetricDescriptorsResponse.Marshal(b, m, deterministic) +} +func (dst *ListMetricDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsResponse.Merge(dst, src) +} +func (m *ListMetricDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListMetricDescriptorsResponse.Size(m) +} +func (m *ListMetricDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsResponse proto.InternalMessageInfo + +func (m *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor { + if m != nil { + return m.MetricDescriptors + } + return nil +} + +func (m *ListMetricDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMetricDescriptor` request. +type GetMetricDescriptorRequest struct { + // The metric descriptor on which to execute the request. The format is + // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`. + // An example value of `{metric_id}` is + // `"compute.googleapis.com/instance/disk/read_bytes_count"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMetricDescriptorRequest) Reset() { *m = GetMetricDescriptorRequest{} } +func (m *GetMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMetricDescriptorRequest) ProtoMessage() {} +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{5} +} +func (m *GetMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *GetMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (dst *GetMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMetricDescriptorRequest.Merge(dst, src) +} +func (m *GetMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetMetricDescriptorRequest.Size(m) +} +func (m *GetMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMetricDescriptorRequest proto.InternalMessageInfo + +func (m *GetMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateMetricDescriptor` request. +type CreateMetricDescriptorRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The new [custom metric](/monitoring/custom-metrics) + // descriptor. + MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateMetricDescriptorRequest) Reset() { *m = CreateMetricDescriptorRequest{} } +func (m *CreateMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*CreateMetricDescriptorRequest) ProtoMessage() {} +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{6} +} +func (m *CreateMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *CreateMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (dst *CreateMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateMetricDescriptorRequest.Merge(dst, src) +} +func (m *CreateMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_CreateMetricDescriptorRequest.Size(m) +} +func (m *CreateMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateMetricDescriptorRequest proto.InternalMessageInfo + +func (m *CreateMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +// The `DeleteMetricDescriptor` request. +type DeleteMetricDescriptorRequest struct { + // The metric descriptor on which to execute the request. The format is + // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`. + // An example of `{metric_id}` is: + // `"custom.googleapis.com/my_test_metric"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteMetricDescriptorRequest) Reset() { *m = DeleteMetricDescriptorRequest{} } +func (m *DeleteMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteMetricDescriptorRequest) ProtoMessage() {} +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{7} +} +func (m *DeleteMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *DeleteMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteMetricDescriptorRequest.Merge(dst, src) +} +func (m *DeleteMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Size(m) +} +func (m *DeleteMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteMetricDescriptorRequest proto.InternalMessageInfo + +func (m *DeleteMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListTimeSeries` request. +type ListTimeSeriesRequest struct { + // The project on which to execute the request. The format is + // "projects/{project_id_or_number}". + Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` + // A [monitoring filter](/monitoring/api/v3/filters) that specifies which time + // series should be returned. The filter must specify a single metric type, + // and can additionally specify metric labels and other information. For + // example: + // + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.label.instance_name = "my-instance-name" + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The time interval for which results should be returned. Only time series + // that contain data points in the specified interval are included + // in the response. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // By default, the raw time series data is returned. + // Use this field to combine multiple time series for different + // views of the data. + Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"` + // Unsupported: must be left blank. The points in each time series are + // returned in reverse time order. + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // Specifies which information is returned about the time series. + View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` + // A positive number that is the maximum number of results to return. If + // `page_size` is empty or more than 100,000 results, the effective + // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the + // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is + // the maximum number of `TimeSeries` returned. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListTimeSeriesRequest) Reset() { *m = ListTimeSeriesRequest{} } +func (m *ListTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesRequest) ProtoMessage() {} +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{8} +} +func (m *ListTimeSeriesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListTimeSeriesRequest.Unmarshal(m, b) +} +func (m *ListTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListTimeSeriesRequest.Marshal(b, m, deterministic) +} +func (dst *ListTimeSeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListTimeSeriesRequest.Merge(dst, src) +} +func (m *ListTimeSeriesRequest) XXX_Size() int { + return xxx_messageInfo_ListTimeSeriesRequest.Size(m) +} +func (m *ListTimeSeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListTimeSeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListTimeSeriesRequest proto.InternalMessageInfo + +func (m *ListTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListTimeSeriesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListTimeSeriesRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *ListTimeSeriesRequest) GetAggregation() *Aggregation { + if m != nil { + return m.Aggregation + } + return nil +} + +func (m *ListTimeSeriesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { + if m != nil { + return m.View + } + return ListTimeSeriesRequest_FULL +} + +func (m *ListTimeSeriesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTimeSeriesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListTimeSeries` response. +type ListTimeSeriesResponse struct { + // One or more time series that match the filter included in the request. + TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. + ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListTimeSeriesResponse) Reset() { *m = ListTimeSeriesResponse{} } +func (m *ListTimeSeriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesResponse) ProtoMessage() {} +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{9} +} +func (m *ListTimeSeriesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListTimeSeriesResponse.Unmarshal(m, b) +} +func (m *ListTimeSeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListTimeSeriesResponse.Marshal(b, m, deterministic) +} +func (dst *ListTimeSeriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListTimeSeriesResponse.Merge(dst, src) +} +func (m *ListTimeSeriesResponse) XXX_Size() int { + return xxx_messageInfo_ListTimeSeriesResponse.Size(m) +} +func (m *ListTimeSeriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListTimeSeriesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListTimeSeriesResponse proto.InternalMessageInfo + +func (m *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +func (m *ListTimeSeriesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status { + if m != nil { + return m.ExecutionErrors + } + return nil +} + +// The `CreateTimeSeries` request. +type CreateTimeSeriesRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The new data to be added to a list of time series. + // Adds at most one data point to each of several time series. The new data + // point must be more recent than any other point in its time series. Each + // `TimeSeries` value must fully specify a unique time series by supplying + // all label values for the metric and the monitored resource. + TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesRequest) Reset() { *m = CreateTimeSeriesRequest{} } +func (m *CreateTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesRequest) ProtoMessage() {} +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{10} +} +func (m *CreateTimeSeriesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesRequest.Unmarshal(m, b) +} +func (m *CreateTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesRequest.Marshal(b, m, deterministic) +} +func (dst *CreateTimeSeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesRequest.Merge(dst, src) +} +func (m *CreateTimeSeriesRequest) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesRequest.Size(m) +} +func (m *CreateTimeSeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesRequest proto.InternalMessageInfo + +func (m *CreateTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +// Describes the result of a failed request to write data to a time series. +type CreateTimeSeriesError struct { + // The time series, including the `Metric`, `MonitoredResource`, + // and `Point`s (including timestamp and value) that resulted + // in the error. This field provides all of the context that + // would be needed to retry the operation. + TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // The status of the requested write operation. + Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesError) Reset() { *m = CreateTimeSeriesError{} } +func (m *CreateTimeSeriesError) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesError) ProtoMessage() {} +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { + return fileDescriptor_metric_service_6b0c3991af8d9a29, []int{11} +} +func (m *CreateTimeSeriesError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesError.Unmarshal(m, b) +} +func (m *CreateTimeSeriesError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesError.Marshal(b, m, deterministic) +} +func (dst *CreateTimeSeriesError) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesError.Merge(dst, src) +} +func (m *CreateTimeSeriesError) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesError.Size(m) +} +func (m *CreateTimeSeriesError) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesError.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesError proto.InternalMessageInfo + +func (m *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +func (m *CreateTimeSeriesError) GetStatus() *status.Status { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*ListMonitoredResourceDescriptorsRequest)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsRequest") + proto.RegisterType((*ListMonitoredResourceDescriptorsResponse)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsResponse") + proto.RegisterType((*GetMonitoredResourceDescriptorRequest)(nil), "google.monitoring.v3.GetMonitoredResourceDescriptorRequest") + proto.RegisterType((*ListMetricDescriptorsRequest)(nil), "google.monitoring.v3.ListMetricDescriptorsRequest") + proto.RegisterType((*ListMetricDescriptorsResponse)(nil), "google.monitoring.v3.ListMetricDescriptorsResponse") + proto.RegisterType((*GetMetricDescriptorRequest)(nil), "google.monitoring.v3.GetMetricDescriptorRequest") + proto.RegisterType((*CreateMetricDescriptorRequest)(nil), "google.monitoring.v3.CreateMetricDescriptorRequest") + proto.RegisterType((*DeleteMetricDescriptorRequest)(nil), "google.monitoring.v3.DeleteMetricDescriptorRequest") + proto.RegisterType((*ListTimeSeriesRequest)(nil), "google.monitoring.v3.ListTimeSeriesRequest") + proto.RegisterType((*ListTimeSeriesResponse)(nil), "google.monitoring.v3.ListTimeSeriesResponse") + proto.RegisterType((*CreateTimeSeriesRequest)(nil), "google.monitoring.v3.CreateTimeSeriesRequest") + proto.RegisterType((*CreateTimeSeriesError)(nil), "google.monitoring.v3.CreateTimeSeriesError") + proto.RegisterEnum("google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView", ListTimeSeriesRequest_TimeSeriesView_name, ListTimeSeriesRequest_TimeSeriesView_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricServiceClient is the client API for MetricService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricServiceClient interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](/monitoring/custom-metrics). + CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](/monitoring/custom-metrics) can be deleted. + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type metricServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricServiceClient(cc *grpc.ClientConn) MetricServiceClient { + return &metricServiceClient{cc} +} + +func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) { + out := new(monitoredres.MonitoredResourceDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { + out := new(ListTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricServiceServer is the server API for MetricService service. +type MetricServiceServer interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](/monitoring/custom-metrics). + CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](/monitoring/custom-metrics) can be deleted. + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*empty.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*empty.Empty, error) +} + +func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { + s.RegisterService(&_MetricService_serviceDesc, srv) +} + +func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMonitoredResourceDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.MetricService", + HandlerType: (*MetricServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "GetMonitoredResourceDescriptor", + Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _MetricService_ListMetricDescriptors_Handler, + }, + { + MethodName: "GetMetricDescriptor", + Handler: _MetricService_GetMetricDescriptor_Handler, + }, + { + MethodName: "CreateMetricDescriptor", + Handler: _MetricService_CreateMetricDescriptor_Handler, + }, + { + MethodName: "DeleteMetricDescriptor", + Handler: _MetricService_DeleteMetricDescriptor_Handler, + }, + { + MethodName: "ListTimeSeries", + Handler: _MetricService_ListTimeSeries_Handler, + }, + { + MethodName: "CreateTimeSeries", + Handler: _MetricService_CreateTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/metric_service.proto", +} + +func init() { + proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor_metric_service_6b0c3991af8d9a29) +} + +var fileDescriptor_metric_service_6b0c3991af8d9a29 = []byte{ + // 1049 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0x67, 0xe2, 0x34, 0x71, 0x9e, 0xd5, 0xd4, 0x9d, 0xb6, 0xae, 0xd9, 0x26, 0x95, 0xbb, 0xa8, + 0xc4, 0x75, 0xcb, 0x6e, 0x65, 0x57, 0x1c, 0x92, 0x26, 0x52, 0xfe, 0x51, 0x2a, 0x02, 0x8a, 0xd6, + 0x25, 0x87, 0x2a, 0x92, 0xb5, 0xb1, 0xa7, 0xab, 0x01, 0xef, 0xce, 0x32, 0x3b, 0x76, 0x9b, 0xa2, + 0x70, 0xe0, 0xd0, 0x3b, 0x02, 0x24, 0xf8, 0x0a, 0x39, 0x80, 0xf8, 0x0a, 0x88, 0x13, 0x57, 0xce, + 0xdc, 0xf8, 0x0a, 0xdc, 0xd1, 0xce, 0xee, 0xc6, 0xf6, 0x7a, 0x77, 0x6d, 0x73, 0xe9, 0xcd, 0x3b, + 0xef, 0xcd, 0x7b, 0xbf, 0xf7, 0x9b, 0x79, 0xef, 0x37, 0x86, 0x7b, 0x16, 0x63, 0x56, 0x97, 0xe8, + 0x36, 0x73, 0xa8, 0x60, 0x9c, 0x3a, 0x96, 0xde, 0x6f, 0xe8, 0x36, 0x11, 0x9c, 0xb6, 0x5b, 0x1e, + 0xe1, 0x7d, 0xda, 0x26, 0x9a, 0xcb, 0x99, 0x60, 0xf8, 0x7a, 0xe0, 0xaa, 0x0d, 0x5c, 0xb5, 0x7e, + 0x43, 0x59, 0x09, 0x03, 0x98, 0x2e, 0xd5, 0x4d, 0xc7, 0x61, 0xc2, 0x14, 0x94, 0x39, 0x5e, 0xb0, + 0x47, 0xb9, 0x39, 0x64, 0x0d, 0x82, 0x86, 0x86, 0xf7, 0x86, 0x0d, 0x41, 0x40, 0xd2, 0x69, 0x71, + 0xe2, 0xb1, 0x1e, 0x8f, 0x32, 0x2a, 0x77, 0x12, 0xc1, 0xb5, 0x99, 0x6d, 0x33, 0x27, 0xd3, 0x65, + 0x24, 0xd5, 0xed, 0xd0, 0x45, 0x7e, 0x9d, 0xf4, 0x5e, 0xe8, 0x9d, 0x1e, 0x97, 0x20, 0x43, 0xfb, + 0xad, 0xb8, 0x9d, 0xd8, 0xae, 0x38, 0x8d, 0x15, 0xc0, 0xdd, 0xb6, 0xee, 0x09, 0x53, 0xf4, 0xc2, + 0xca, 0xd4, 0xef, 0x10, 0xac, 0x1d, 0x50, 0x4f, 0x7c, 0x1a, 0x81, 0x37, 0x42, 0xec, 0x7b, 0xc4, + 0x6b, 0x73, 0xea, 0x0a, 0xc6, 0x3d, 0x83, 0x7c, 0xd5, 0x23, 0x9e, 0xc0, 0x18, 0xe6, 0x1d, 0xd3, + 0x26, 0xe5, 0x4b, 0x15, 0x54, 0x5d, 0x32, 0xe4, 0x6f, 0x5c, 0x82, 0x85, 0x17, 0xb4, 0x2b, 0x08, + 0x2f, 0xcf, 0xc9, 0xd5, 0xf0, 0x0b, 0xdf, 0x82, 0x25, 0xd7, 0xb4, 0x48, 0xcb, 0xa3, 0xaf, 0x49, + 0x39, 0x57, 0x41, 0xd5, 0x4b, 0x46, 0xde, 0x5f, 0x68, 0xd2, 0xd7, 0x04, 0xaf, 0x02, 0x48, 0xa3, + 0x60, 0x5f, 0x12, 0xa7, 0x3c, 0x2f, 0x37, 0x4a, 0xf7, 0x67, 0xfe, 0x82, 0xfa, 0x0b, 0x82, 0xea, + 0x64, 0x4c, 0x9e, 0xcb, 0x1c, 0x8f, 0xe0, 0xe7, 0x70, 0x3d, 0xa2, 0xbb, 0xd5, 0x19, 0xd8, 0xcb, + 0xa8, 0x92, 0xab, 0x16, 0xea, 0x6b, 0x5a, 0x78, 0xda, 0xa6, 0x4b, 0xb5, 0x8c, 0x78, 0xc6, 0x35, + 0x3e, 0x9e, 0x03, 0xbf, 0x0f, 0x57, 0x1c, 0xf2, 0x4a, 0xb4, 0x86, 0xc0, 0x06, 0x55, 0x5e, 0xf6, + 0x97, 0x0f, 0x2f, 0x00, 0x6f, 0xc0, 0xdd, 0x27, 0x24, 0x0b, 0x6e, 0x9c, 0xc1, 0xdc, 0x80, 0x41, + 0xf5, 0x0d, 0x82, 0x15, 0x59, 0xad, 0x3c, 0xec, 0xb7, 0x48, 0xfb, 0x0f, 0x08, 0x56, 0x53, 0x80, + 0x84, 0x5c, 0x7f, 0x02, 0x38, 0x6c, 0xa9, 0x71, 0xa6, 0x57, 0x46, 0x98, 0x8e, 0x85, 0x30, 0xae, + 0xda, 0xf1, 0xa0, 0x53, 0x93, 0xfb, 0x10, 0x14, 0x9f, 0xdc, 0x78, 0xc4, 0x0c, 0x46, 0xbf, 0x81, + 0xd5, 0x5d, 0x4e, 0x4c, 0x41, 0x66, 0xd8, 0x84, 0x9f, 0xc2, 0xd5, 0xb1, 0xda, 0x24, 0xa0, 0x49, + 0xa5, 0x15, 0xe3, 0xa5, 0xa9, 0x0d, 0x58, 0xdd, 0x23, 0x5d, 0x32, 0x53, 0x7e, 0xf5, 0xa7, 0x1c, + 0xdc, 0xf0, 0xd9, 0x7f, 0x46, 0x6d, 0xd2, 0x24, 0x9c, 0x92, 0xb1, 0xf3, 0x87, 0x29, 0xce, 0x7f, + 0x0b, 0xf2, 0xd4, 0x11, 0x84, 0xf7, 0xcd, 0xae, 0x3c, 0xe0, 0x42, 0x5d, 0xd5, 0x92, 0xe6, 0x9d, + 0xe6, 0xa7, 0x79, 0x1a, 0x7a, 0x1a, 0x17, 0x7b, 0xf0, 0x2e, 0x14, 0x4c, 0xcb, 0xe2, 0xc4, 0x92, + 0x93, 0x45, 0x5e, 0xb9, 0x42, 0xfd, 0x4e, 0x72, 0x88, 0xed, 0x81, 0xa3, 0x31, 0xbc, 0x0b, 0xbf, + 0x0b, 0x79, 0xc6, 0x3b, 0x84, 0xb7, 0x4e, 0x4e, 0xcb, 0x0b, 0x12, 0xde, 0xa2, 0xfc, 0xde, 0x39, + 0xc5, 0x9f, 0xc1, 0x7c, 0x9f, 0x92, 0x97, 0xe5, 0xc5, 0x0a, 0xaa, 0x2e, 0xd7, 0xd7, 0x93, 0x03, + 0x27, 0xd2, 0xa0, 0x0d, 0x56, 0x8e, 0x28, 0x79, 0x69, 0xc8, 0x38, 0xa3, 0xf7, 0x3d, 0x9f, 0x79, + 0xdf, 0x97, 0xe2, 0xf7, 0x7d, 0x0d, 0x96, 0x47, 0x63, 0xe2, 0x3c, 0xcc, 0x7f, 0xf4, 0xf9, 0xc1, + 0x41, 0xf1, 0x1d, 0x5c, 0x80, 0xc5, 0x8f, 0xf7, 0xb7, 0xf7, 0xf6, 0x8d, 0x66, 0x11, 0xa9, 0xbf, + 0x23, 0x28, 0xc5, 0x31, 0x85, 0x1d, 0xb1, 0x0d, 0x05, 0x41, 0x6d, 0xe2, 0x4b, 0x0c, 0x25, 0x51, + 0x2b, 0x54, 0xd2, 0x29, 0x0f, 0xb7, 0x83, 0xb8, 0xf8, 0x3d, 0x6d, 0x1f, 0xe0, 0x4d, 0x28, 0x92, + 0x57, 0xa4, 0xdd, 0xf3, 0x29, 0x6e, 0x11, 0xce, 0xfd, 0xd6, 0xcb, 0xc9, 0x7c, 0x38, 0xca, 0xc7, + 0xdd, 0xb6, 0xd6, 0x94, 0xd3, 0xdd, 0xb8, 0x72, 0xe1, 0xbb, 0x2f, 0x5d, 0x55, 0x17, 0x6e, 0x06, + 0x4d, 0x91, 0x7e, 0xc1, 0x86, 0xdb, 0x21, 0x56, 0xd8, 0xdc, 0xec, 0x85, 0xf9, 0x83, 0xed, 0x46, + 0x3c, 0xa5, 0x04, 0x33, 0xce, 0x1a, 0x9a, 0x99, 0xb5, 0x1a, 0x2c, 0x04, 0x3a, 0x16, 0xf6, 0x68, + 0x12, 0x07, 0xa1, 0x47, 0xfd, 0x5f, 0x80, 0xcb, 0x41, 0x2b, 0x36, 0x83, 0x97, 0x00, 0xfe, 0x1b, + 0x41, 0x65, 0x92, 0xc2, 0xe0, 0xcd, 0xf4, 0xdb, 0x39, 0x85, 0x5a, 0x2a, 0x5b, 0xff, 0x77, 0x7b, + 0x70, 0xb5, 0xd4, 0xf5, 0x6f, 0xff, 0xfa, 0xe7, 0xfb, 0xb9, 0x47, 0xb8, 0xee, 0xbf, 0x04, 0xbe, + 0xf6, 0x0f, 0x65, 0xd3, 0xe5, 0xec, 0x0b, 0xd2, 0x16, 0x9e, 0x5e, 0x3b, 0x1b, 0xbc, 0x36, 0x92, + 0xa0, 0xff, 0x81, 0xe0, 0x76, 0xb6, 0x22, 0xe1, 0x8d, 0x64, 0x78, 0x53, 0xe9, 0x98, 0x32, 0xad, + 0xac, 0xaa, 0x8f, 0x65, 0x11, 0x1f, 0xe2, 0x47, 0x49, 0x45, 0x64, 0xd6, 0xa0, 0xd7, 0xce, 0xf0, + 0x6f, 0x28, 0x98, 0x89, 0x63, 0x8a, 0x84, 0xeb, 0x19, 0xe4, 0xa6, 0xe8, 0xa8, 0xd2, 0x98, 0x69, + 0x4f, 0x78, 0x0a, 0xba, 0x2c, 0xe0, 0x1e, 0x5e, 0x4b, 0x39, 0x85, 0x31, 0x64, 0x3f, 0x23, 0xb8, + 0x96, 0xa0, 0x57, 0xf8, 0x61, 0x3a, 0xdf, 0xc9, 0x2a, 0xa1, 0x64, 0xca, 0x8e, 0x5a, 0x97, 0xc0, + 0x1e, 0xe0, 0x5a, 0x32, 0xb3, 0x71, 0x5c, 0x7a, 0xad, 0x76, 0x86, 0x7f, 0x45, 0x50, 0x4a, 0x56, + 0x46, 0x9c, 0x42, 0x4e, 0xa6, 0x8e, 0x4e, 0x40, 0xb8, 0x23, 0x11, 0x3e, 0x56, 0xa7, 0xa5, 0x6e, + 0x7d, 0x5c, 0x80, 0x7d, 0x36, 0x4b, 0xc9, 0x5a, 0x9a, 0x86, 0x38, 0x53, 0x79, 0x95, 0x52, 0xb4, + 0x29, 0x7a, 0x25, 0x6b, 0xfb, 0xfe, 0x2b, 0x39, 0x62, 0xb3, 0x36, 0x0b, 0x9b, 0x3f, 0x22, 0x58, + 0x1e, 0x95, 0x05, 0x7c, 0x7f, 0x06, 0x41, 0x53, 0x1e, 0x4c, 0xe7, 0x1c, 0x5e, 0xc4, 0xaa, 0x44, + 0xa8, 0xe2, 0x4a, 0x32, 0x9b, 0x43, 0xa3, 0xf1, 0x0d, 0x82, 0x62, 0x7c, 0xee, 0xe2, 0x0f, 0xb2, + 0xce, 0x77, 0x1c, 0x5b, 0x1a, 0x4f, 0xf7, 0x25, 0x8a, 0xbb, 0xea, 0x44, 0x14, 0xeb, 0xa8, 0xb6, + 0x73, 0x8e, 0xa0, 0xdc, 0x66, 0x76, 0x62, 0xe6, 0x1d, 0x3c, 0x32, 0x91, 0x0f, 0xfd, 0x34, 0x87, + 0xe8, 0xf9, 0x56, 0xe8, 0x6b, 0xb1, 0xae, 0xe9, 0x58, 0x1a, 0xe3, 0x96, 0x6e, 0x11, 0x47, 0x82, + 0xd0, 0x03, 0x93, 0xe9, 0x52, 0x6f, 0xf4, 0x6f, 0xd2, 0xc6, 0xe0, 0xeb, 0x7c, 0x4e, 0x79, 0x12, + 0x04, 0xd8, 0xed, 0xb2, 0x5e, 0x27, 0x1a, 0x4d, 0x7e, 0xca, 0xa3, 0xc6, 0x9f, 0x91, 0xf1, 0x58, + 0x1a, 0x8f, 0x07, 0xc6, 0xe3, 0xa3, 0xc6, 0xc9, 0x82, 0x4c, 0xd2, 0xf8, 0x2f, 0x00, 0x00, 0xff, + 0xff, 0x79, 0x2b, 0x3b, 0x90, 0x4a, 0x0e, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go new file mode 100644 index 000000000..dbace7899 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go @@ -0,0 +1,97 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/mutation_record.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Describes a change made to a configuration. +type MutationRecord struct { + // When the change occurred. + MutateTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` + // The email address of the user making the change. + MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MutationRecord) Reset() { *m = MutationRecord{} } +func (m *MutationRecord) String() string { return proto.CompactTextString(m) } +func (*MutationRecord) ProtoMessage() {} +func (*MutationRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_mutation_record_c4e4f383b5193b81, []int{0} +} +func (m *MutationRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MutationRecord.Unmarshal(m, b) +} +func (m *MutationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MutationRecord.Marshal(b, m, deterministic) +} +func (dst *MutationRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutationRecord.Merge(dst, src) +} +func (m *MutationRecord) XXX_Size() int { + return xxx_messageInfo_MutationRecord.Size(m) +} +func (m *MutationRecord) XXX_DiscardUnknown() { + xxx_messageInfo_MutationRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_MutationRecord proto.InternalMessageInfo + +func (m *MutationRecord) GetMutateTime() *timestamp.Timestamp { + if m != nil { + return m.MutateTime + } + return nil +} + +func (m *MutationRecord) GetMutatedBy() string { + if m != nil { + return m.MutatedBy + } + return "" +} + +func init() { + proto.RegisterType((*MutationRecord)(nil), "google.monitoring.v3.MutationRecord") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/mutation_record.proto", fileDescriptor_mutation_record_c4e4f383b5193b81) +} + +var fileDescriptor_mutation_record_c4e4f383b5193b81 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0xcf, 0x2d, 0x2d, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0xa8, 0xd5, 0x43, 0xa8, 0xd5, 0x2b, 0x33, + 0x96, 0x92, 0x87, 0x9a, 0x00, 0x56, 0x93, 0x54, 0x9a, 0xa6, 0x5f, 0x92, 0x99, 0x9b, 0x5a, 0x5c, + 0x92, 0x98, 0x5b, 0x00, 0xd1, 0xa6, 0x94, 0xc3, 0xc5, 0xe7, 0x0b, 0x35, 0x2f, 0x08, 0x6c, 0x9c, + 0x90, 0x35, 0x17, 0x37, 0xd8, 0x86, 0xd4, 0x78, 0x90, 0x5a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e, + 0x23, 0x29, 0x3d, 0xa8, 0xf1, 0x30, 0x83, 0xf4, 0x42, 0x60, 0x06, 0x05, 0x71, 0x41, 0x94, 0x83, + 0x04, 0x84, 0x64, 0xb9, 0xa0, 0xbc, 0x94, 0xf8, 0xa4, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce, + 0x20, 0x4e, 0xa8, 0x88, 0x53, 0xa5, 0xd3, 0x6a, 0x46, 0x2e, 0x89, 0xe4, 0xfc, 0x5c, 0x3d, 0x6c, + 0x6e, 0x75, 0x12, 0x46, 0x75, 0x48, 0x00, 0xc8, 0xa6, 0x00, 0xc6, 0x28, 0x3b, 0xa8, 0xe2, 0xf4, + 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x3b, 0xf4, + 0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xa8, 0x61, 0x64, 0x8d, 0xe0, 0xad, 0x62, 0x92, 0x72, 0x87, + 0x18, 0xe0, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, 0x8b, 0xb0, 0x33, 0xcc, 0xf8, 0x14, 0x4c, 0x32, + 0x06, 0x2c, 0x19, 0x83, 0x90, 0x8c, 0x09, 0x33, 0x4e, 0x62, 0x03, 0x5b, 0x62, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x95, 0xa7, 0xf3, 0xbd, 0x87, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go new file mode 100644 index 000000000..a3d70d842 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go @@ -0,0 +1,368 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import label "google.golang.org/genproto/googleapis/api/label" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates whether the channel has been verified or not. It is illegal +// to specify this field in a +// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] +// or an +// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] +// operation. +type NotificationChannel_VerificationStatus int32 + +const ( + // Sentinel value used to indicate that the state is unknown, omitted, or + // is not applicable (as in the case of channels that neither support + // nor require verification in order to function). + NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 + // The channel has yet to be verified and requires verification to function. + // Note that this state also applies to the case where the verification + // process has been initiated by sending a verification code but where + // the verification code has not been submitted to complete the process. + NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 + // It has been proven that notifications can be received on this + // notification channel and that someone on the project has access + // to messages that are delivered to that channel. + NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 +) + +var NotificationChannel_VerificationStatus_name = map[int32]string{ + 0: "VERIFICATION_STATUS_UNSPECIFIED", + 1: "UNVERIFIED", + 2: "VERIFIED", +} +var NotificationChannel_VerificationStatus_value = map[string]int32{ + "VERIFICATION_STATUS_UNSPECIFIED": 0, + "UNVERIFIED": 1, + "VERIFIED": 2, +} + +func (x NotificationChannel_VerificationStatus) String() string { + return proto.EnumName(NotificationChannel_VerificationStatus_name, int32(x)) +} +func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_notification_5449d40305a71b45, []int{1, 0} +} + +// A description of a notification channel. The descriptor includes +// the properties of the channel and the set of labels or fields that +// must be specified to configure channels of a given type. +type NotificationChannelDescriptor struct { + // The full REST resource name for this descriptor. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE] + // + // In the above, `[TYPE]` is the value of the `type` field. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // The type of notification channel, such as "email", "sms", etc. + // Notification channel types are globally unique. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // A human-readable name for the notification channel type. This + // form of the name is suitable for a user interface. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // A human-readable description of the notification channel + // type. The description may include a description of the properties + // of the channel and pointers to external documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The set of labels that must be defined to identify a particular + // channel of the corresponding type. Each label includes a + // description for how that field should be populated. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // The tiers that support this notification channel; the project service tier + // must be one of the supported_tiers. + SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationChannelDescriptor) Reset() { *m = NotificationChannelDescriptor{} } +func (m *NotificationChannelDescriptor) String() string { return proto.CompactTextString(m) } +func (*NotificationChannelDescriptor) ProtoMessage() {} +func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_5449d40305a71b45, []int{0} +} +func (m *NotificationChannelDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationChannelDescriptor.Unmarshal(m, b) +} +func (m *NotificationChannelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationChannelDescriptor.Marshal(b, m, deterministic) +} +func (dst *NotificationChannelDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationChannelDescriptor.Merge(dst, src) +} +func (m *NotificationChannelDescriptor) XXX_Size() int { + return xxx_messageInfo_NotificationChannelDescriptor.Size(m) +} +func (m *NotificationChannelDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationChannelDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationChannelDescriptor proto.InternalMessageInfo + +func (m *NotificationChannelDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannelDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +// Deprecated: Do not use. +func (m *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { + if m != nil { + return m.SupportedTiers + } + return nil +} + +// A `NotificationChannel` is a medium through which an alert is +// delivered when a policy violation is detected. Examples of channels +// include email, SMS, and third-party messaging applications. Fields +// containing sensitive information like authentication tokens or +// contact info are only partially populated on retrieval. +type NotificationChannel struct { + // The type of the notification channel. This field matches the + // value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The full REST resource name for this channel. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] + // + // The `[CHANNEL_ID]` is automatically assigned by the server on creation. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // An optional human-readable name for this notification channel. It is + // recommended that you specify a non-empty and unique name in order to + // make it easier to identify the channels in your project, though this is + // not enforced. The display name is limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // An optional human-readable description of this notification channel. This + // description may provide additional details, beyond the display + // name, for the channel. This may not exceeed 1024 Unicode characters. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Configuration fields that define the channel and its behavior. The + // permissible and required labels are specified in the + // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the + // `NotificationChannelDescriptor` corresponding to the `type` field. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // User-supplied key/value data that does not need to conform to + // the corresponding `NotificationChannelDescriptor`'s schema, unlike + // the `labels` field. This field is intended to be used for organizing + // and identifying the `NotificationChannel` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indicates whether this channel has been verified or not. On a + // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // or + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation, this field is expected to be populated. + // + // If the value is `UNVERIFIED`, then it indicates that the channel is + // non-functioning (it both requires verification and lacks verification); + // otherwise, it is assumed that the channel works. + // + // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that + // the channel is of a type that does not require verification or that + // this specific channel has been exempted from verification because it was + // created prior to verification being required for channels of this type. + // + // This field cannot be modified using a standard + // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + // operation. To change the value of this field, you must call + // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` + // Whether notifications are forwarded to the described channel. This makes + // it possible to disable delivery of notifications to a particular channel + // without removing the channel from all alerting policies that reference + // the channel. This is a more convenient approach when the change is + // temporary and you want to receive notifications from the same set + // of alerting policies on the channel at some point in the future. + Enabled *wrappers.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationChannel) Reset() { *m = NotificationChannel{} } +func (m *NotificationChannel) String() string { return proto.CompactTextString(m) } +func (*NotificationChannel) ProtoMessage() {} +func (*NotificationChannel) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_5449d40305a71b45, []int{1} +} +func (m *NotificationChannel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationChannel.Unmarshal(m, b) +} +func (m *NotificationChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationChannel.Marshal(b, m, deterministic) +} +func (dst *NotificationChannel) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationChannel.Merge(dst, src) +} +func (m *NotificationChannel) XXX_Size() int { + return xxx_messageInfo_NotificationChannel.Size(m) +} +func (m *NotificationChannel) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationChannel.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationChannel proto.InternalMessageInfo + +func (m *NotificationChannel) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannel) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannel) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannel) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannel) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NotificationChannel) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { + if m != nil { + return m.VerificationStatus + } + return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED +} + +func (m *NotificationChannel) GetEnabled() *wrappers.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func init() { + proto.RegisterType((*NotificationChannelDescriptor)(nil), "google.monitoring.v3.NotificationChannelDescriptor") + proto.RegisterType((*NotificationChannel)(nil), "google.monitoring.v3.NotificationChannel") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.LabelsEntry") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.UserLabelsEntry") + proto.RegisterEnum("google.monitoring.v3.NotificationChannel_VerificationStatus", NotificationChannel_VerificationStatus_name, NotificationChannel_VerificationStatus_value) +} + +func init() { + proto.RegisterFile("google/monitoring/v3/notification.proto", fileDescriptor_notification_5449d40305a71b45) +} + +var fileDescriptor_notification_5449d40305a71b45 = []byte{ + // 602 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x6d, 0x6b, 0xdb, 0x3c, + 0x14, 0x7d, 0x9c, 0x34, 0x7d, 0x5a, 0xb9, 0xa4, 0x9d, 0x5a, 0x86, 0xf1, 0xde, 0xd2, 0xee, 0xc3, + 0xf2, 0xc9, 0x86, 0x64, 0x83, 0x75, 0x6f, 0xd0, 0xa4, 0xe9, 0x08, 0xac, 0x59, 0xc9, 0xdb, 0xa0, + 0x14, 0x82, 0x92, 0xa8, 0x9e, 0x98, 0x2d, 0x19, 0x49, 0xf6, 0xc8, 0xcf, 0xd8, 0x8f, 0xd8, 0x87, + 0xed, 0xa7, 0xec, 0x57, 0x0d, 0xcb, 0x8a, 0xed, 0xb5, 0x86, 0x75, 0xdf, 0x74, 0xcf, 0x3d, 0xe7, + 0xdc, 0x7b, 0x4f, 0x4c, 0xc0, 0x33, 0x8f, 0x31, 0xcf, 0xc7, 0x6e, 0xc0, 0x28, 0x91, 0x8c, 0x13, + 0xea, 0xb9, 0x71, 0xdb, 0xa5, 0x4c, 0x92, 0x6b, 0xb2, 0x40, 0x92, 0x30, 0xea, 0x84, 0x9c, 0x49, + 0x06, 0x0f, 0x52, 0xa2, 0x93, 0x13, 0x9d, 0xb8, 0x6d, 0x3f, 0xd4, 0x72, 0x14, 0x12, 0x17, 0x51, + 0xca, 0xa4, 0x92, 0x88, 0x54, 0x63, 0xdf, 0x2f, 0x74, 0x7d, 0x34, 0xc7, 0xbe, 0xc6, 0x0f, 0x4b, + 0x87, 0x2e, 0x58, 0x10, 0xac, 0xc7, 0xd9, 0x8f, 0x35, 0x45, 0x55, 0xf3, 0xe8, 0xda, 0xfd, 0xca, + 0x51, 0x18, 0x62, 0xae, 0xad, 0x8f, 0xbe, 0x55, 0xc0, 0xa3, 0x41, 0x61, 0xcb, 0xee, 0x67, 0x44, + 0x29, 0xf6, 0x4f, 0xb1, 0x58, 0x70, 0x12, 0x4a, 0xc6, 0x21, 0x04, 0x1b, 0x14, 0x05, 0xd8, 0xda, + 0x6c, 0x18, 0xcd, 0xed, 0xa1, 0x7a, 0x27, 0x98, 0x5c, 0x85, 0xd8, 0x32, 0x52, 0x2c, 0x79, 0xc3, + 0x43, 0xb0, 0xb3, 0x24, 0x22, 0xf4, 0xd1, 0x6a, 0xa6, 0xf8, 0x15, 0xd5, 0x33, 0x35, 0x36, 0x48, + 0x64, 0x0d, 0x60, 0x2e, 0xb5, 0x31, 0x61, 0xd4, 0xaa, 0x6a, 0x46, 0x0e, 0xc1, 0x36, 0xd8, 0x54, + 0x07, 0x0a, 0x6b, 0xa3, 0x51, 0x6d, 0x9a, 0xad, 0x07, 0x8e, 0x8e, 0x0b, 0x85, 0xc4, 0xf9, 0x90, + 0x74, 0xf2, 0xcd, 0x86, 0x9a, 0x0a, 0x07, 0x60, 0x57, 0x44, 0x61, 0xc8, 0xb8, 0xc4, 0xcb, 0x99, + 0x24, 0x98, 0x0b, 0xab, 0xd6, 0xa8, 0x36, 0xeb, 0xad, 0x43, 0xa7, 0x2c, 0x6c, 0x67, 0x84, 0x79, + 0x4c, 0x16, 0x78, 0x4c, 0x30, 0xef, 0x54, 0x2c, 0x63, 0x58, 0xcf, 0xd4, 0x09, 0x24, 0x8e, 0xbe, + 0xd7, 0xc0, 0x7e, 0x49, 0x26, 0xa5, 0x57, 0x97, 0xa5, 0x73, 0x33, 0x89, 0xea, 0x5f, 0x93, 0xd8, + 0xb8, 0x9d, 0xc4, 0x79, 0x96, 0x44, 0x4d, 0x25, 0xf1, 0xa2, 0xfc, 0x96, 0x92, 0x3d, 0xd3, 0x9c, + 0x44, 0x8f, 0x4a, 0xbe, 0xca, 0x32, 0xba, 0x04, 0x66, 0x24, 0x30, 0x9f, 0x69, 0xcf, 0x2d, 0xe5, + 0x79, 0x7c, 0x77, 0xcf, 0x89, 0xc0, 0xbc, 0xe8, 0x0b, 0xa2, 0x0c, 0x80, 0x01, 0xd8, 0x8f, 0x31, + 0xcf, 0x24, 0x33, 0x21, 0x91, 0x8c, 0x84, 0xb5, 0xdd, 0x30, 0x9a, 0xf5, 0xd6, 0x9b, 0xbb, 0xcf, + 0x98, 0x16, 0x4c, 0x46, 0xca, 0x63, 0x08, 0xe3, 0x5b, 0x18, 0x7c, 0x0e, 0xfe, 0xc7, 0x14, 0xcd, + 0x7d, 0xbc, 0xb4, 0xcc, 0x86, 0xd1, 0x34, 0x5b, 0xf6, 0x7a, 0xc4, 0xfa, 0x23, 0x77, 0x3a, 0x8c, + 0xf9, 0x53, 0xe4, 0x47, 0x78, 0xb8, 0xa6, 0xda, 0xc7, 0xc0, 0x2c, 0xec, 0x0f, 0xf7, 0x40, 0xf5, + 0x0b, 0x5e, 0xe9, 0x9f, 0x32, 0x79, 0xc2, 0x03, 0x50, 0x8b, 0x13, 0x89, 0xfe, 0x70, 0xd3, 0xe2, + 0x55, 0xe5, 0xa5, 0x61, 0xbf, 0x05, 0xbb, 0x37, 0xce, 0xff, 0x17, 0xf9, 0xd1, 0x27, 0x00, 0x6f, + 0x5f, 0x06, 0x9f, 0x82, 0x27, 0xd3, 0xde, 0xb0, 0x7f, 0xd6, 0xef, 0x9e, 0x8c, 0xfb, 0x1f, 0x07, + 0xb3, 0xd1, 0xf8, 0x64, 0x3c, 0x19, 0xcd, 0x26, 0x83, 0xd1, 0x45, 0xaf, 0xdb, 0x3f, 0xeb, 0xf7, + 0x4e, 0xf7, 0xfe, 0x83, 0x75, 0x00, 0x26, 0x83, 0x94, 0xd6, 0x3b, 0xdd, 0x33, 0xe0, 0x0e, 0xd8, + 0xca, 0xaa, 0x4a, 0xe7, 0x87, 0x01, 0xac, 0x05, 0x0b, 0x4a, 0x03, 0xee, 0xdc, 0x2b, 0x26, 0x7c, + 0x91, 0x04, 0x73, 0x61, 0x5c, 0xbe, 0xd3, 0x54, 0x8f, 0xf9, 0x88, 0x7a, 0x0e, 0xe3, 0x9e, 0xeb, + 0x61, 0xaa, 0x62, 0x73, 0xd3, 0x16, 0x0a, 0x89, 0xf8, 0xf3, 0xff, 0xe4, 0x75, 0x5e, 0xfd, 0xac, + 0xd8, 0xef, 0x53, 0x83, 0xae, 0xcf, 0xa2, 0xa5, 0x73, 0x9e, 0x4f, 0x9c, 0xb6, 0x7f, 0xad, 0x9b, + 0x57, 0xaa, 0x79, 0x95, 0x37, 0xaf, 0xa6, 0xed, 0xf9, 0xa6, 0x1a, 0xd2, 0xfe, 0x1d, 0x00, 0x00, + 0xff, 0xff, 0xf7, 0x1b, 0x09, 0x21, 0x28, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go new file mode 100644 index 000000000..4dbcd8c68 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go @@ -0,0 +1,1308 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification_service.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "github.com/golang/protobuf/ptypes/struct" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `ListNotificationChannelDescriptors` request. +type ListNotificationChannelDescriptorsRequest struct { + // The REST resource name of the parent from which to retrieve + // the notification channel descriptors. The expected syntax is: + // + // projects/[PROJECT_ID] + // + // Note that this names the parent container in which to look for the + // descriptors; to retrieve a single descriptor by name, use the + // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelDescriptorsRequest) Reset() { + *m = ListNotificationChannelDescriptorsRequest{} +} +func (m *ListNotificationChannelDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{0} +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Marshal(b, m, deterministic) +} +func (dst *ListNotificationChannelDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Merge(dst, src) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Size(m) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelDescriptorsRequest proto.InternalMessageInfo + +func (m *ListNotificationChannelDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannelDescriptors` response. +type ListNotificationChannelDescriptorsResponse struct { + // The monitored resource descriptors supported for the specified + // project, optionally filtered. + ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelDescriptorsResponse) Reset() { + *m = ListNotificationChannelDescriptorsResponse{} +} +func (m *ListNotificationChannelDescriptorsResponse) String() string { + return proto.CompactTextString(m) +} +func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{1} +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Marshal(b, m, deterministic) +} +func (dst *ListNotificationChannelDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Merge(dst, src) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Size(m) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelDescriptorsResponse proto.InternalMessageInfo + +func (m *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { + if m != nil { + return m.ChannelDescriptors + } + return nil +} + +func (m *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannelDescriptor` response. +type GetNotificationChannelDescriptorRequest struct { + // The channel type for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannelDescriptors/{channel_type}`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelDescriptorRequest) Reset() { + *m = GetNotificationChannelDescriptorRequest{} +} +func (m *GetNotificationChannelDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} +func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{2} +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Marshal(b, m, deterministic) +} +func (dst *GetNotificationChannelDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelDescriptorRequest.Merge(dst, src) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Size(m) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelDescriptorRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateNotificationChannel` request. +type CreateNotificationChannelRequest struct { + // The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID] + // + // Note that this names the container into which the channel will be + // written. This does not name the newly created channel. The resulting + // channel's name will have a normalized version of this field as a prefix, + // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The definition of the `NotificationChannel` to create. + NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNotificationChannelRequest) Reset() { *m = CreateNotificationChannelRequest{} } +func (m *CreateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNotificationChannelRequest) ProtoMessage() {} +func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{3} +} +func (m *CreateNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNotificationChannelRequest.Unmarshal(m, b) +} +func (m *CreateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (dst *CreateNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNotificationChannelRequest.Merge(dst, src) +} +func (m *CreateNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_CreateNotificationChannelRequest.Size(m) +} +func (m *CreateNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNotificationChannelRequest proto.InternalMessageInfo + +func (m *CreateNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `ListNotificationChannels` request. +type ListNotificationChannelsRequest struct { + // The project on which to execute the request. The format is + // `projects/[PROJECT_ID]`. That is, this names the container + // in which to look for the notification channels; it does not name a + // specific channel. To query a specific channel by REST resource name, use + // the + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] operation. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // notification channels to be included in the response. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of fields as in `filter`. Entries can be prefixed with + // a minus sign to sort in descending rather than ascending order. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelsRequest) Reset() { *m = ListNotificationChannelsRequest{} } +func (m *ListNotificationChannelsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsRequest) ProtoMessage() {} +func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{4} +} +func (m *ListNotificationChannelsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelsRequest.Unmarshal(m, b) +} +func (m *ListNotificationChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelsRequest.Marshal(b, m, deterministic) +} +func (dst *ListNotificationChannelsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelsRequest.Merge(dst, src) +} +func (m *ListNotificationChannelsRequest) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelsRequest.Size(m) +} +func (m *ListNotificationChannelsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelsRequest proto.InternalMessageInfo + +func (m *ListNotificationChannelsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannels` response. +type ListNotificationChannelsResponse struct { + // The notification channels defined for the specified project. + NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelsResponse) Reset() { *m = ListNotificationChannelsResponse{} } +func (m *ListNotificationChannelsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsResponse) ProtoMessage() {} +func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{5} +} +func (m *ListNotificationChannelsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelsResponse.Unmarshal(m, b) +} +func (m *ListNotificationChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelsResponse.Marshal(b, m, deterministic) +} +func (dst *ListNotificationChannelsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelsResponse.Merge(dst, src) +} +func (m *ListNotificationChannelsResponse) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelsResponse.Size(m) +} +func (m *ListNotificationChannelsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelsResponse proto.InternalMessageInfo + +func (m *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *ListNotificationChannelsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannel` request. +type GetNotificationChannelRequest struct { + // The channel for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelRequest) Reset() { *m = GetNotificationChannelRequest{} } +func (m *GetNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelRequest) ProtoMessage() {} +func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{6} +} +func (m *GetNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (dst *GetNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelRequest.Merge(dst, src) +} +func (m *GetNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelRequest.Size(m) +} +func (m *GetNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `UpdateNotificationChannel` request. +type UpdateNotificationChannelRequest struct { + // The fields to update. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // A description of the changes to be applied to the specified + // notification channel. The description must provide a definition for + // fields to be updated; the names of these fields should also be + // included in the `update_mask`. + NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateNotificationChannelRequest) Reset() { *m = UpdateNotificationChannelRequest{} } +func (m *UpdateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateNotificationChannelRequest) ProtoMessage() {} +func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{7} +} +func (m *UpdateNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateNotificationChannelRequest.Unmarshal(m, b) +} +func (m *UpdateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateNotificationChannelRequest.Merge(dst, src) +} +func (m *UpdateNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_UpdateNotificationChannelRequest.Size(m) +} +func (m *UpdateNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateNotificationChannelRequest proto.InternalMessageInfo + +func (m *UpdateNotificationChannelRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `DeleteNotificationChannel` request. +type DeleteNotificationChannelRequest struct { + // The channel for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If true, the notification channel will be deleted regardless of its + // use in alert policies (the policies will be updated to remove the + // channel). If false, channels that are still referenced by an existing + // alerting policy will fail to be deleted in a delete operation. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteNotificationChannelRequest) Reset() { *m = DeleteNotificationChannelRequest{} } +func (m *DeleteNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNotificationChannelRequest) ProtoMessage() {} +func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{8} +} +func (m *DeleteNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteNotificationChannelRequest.Unmarshal(m, b) +} +func (m *DeleteNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteNotificationChannelRequest.Merge(dst, src) +} +func (m *DeleteNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_DeleteNotificationChannelRequest.Size(m) +} +func (m *DeleteNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteNotificationChannelRequest proto.InternalMessageInfo + +func (m *DeleteNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteNotificationChannelRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +// The `SendNotificationChannelVerificationCode` request. +type SendNotificationChannelVerificationCodeRequest struct { + // The notification channel to which to send a verification code. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendNotificationChannelVerificationCodeRequest) Reset() { + *m = SendNotificationChannelVerificationCodeRequest{} +} +func (m *SendNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{9} +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Unmarshal(m, b) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic) +} +func (dst *SendNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Merge(dst, src) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Size() int { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Size(m) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendNotificationChannelVerificationCodeRequest proto.InternalMessageInfo + +func (m *SendNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeRequest struct { + // The notification channel for which a verification code is to be generated + // and retrieved. This must name a channel that is already verified; if + // the specified channel is not verified, the request will fail. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The desired expiration time. If specified, the API will guarantee that + // the returned code will not be valid after the specified timestamp; + // however, the API cannot guarantee that the returned code will be + // valid for at least as long as the requested time (the API puts an upper + // bound on the amount of time for which a code may be valid). If omitted, + // a default expiration will be used, which may be less than the max + // permissible expiration (so specifying an expiration may extend the + // code's lifetime over omitting an expiration, even though the API does + // impose an upper limit on the maximum expiration that is permitted). + ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelVerificationCodeRequest) Reset() { + *m = GetNotificationChannelVerificationCodeRequest{} +} +func (m *GetNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{10} +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic) +} +func (dst *GetNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Merge(dst, src) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Size(m) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelVerificationCodeRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamp.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeResponse struct { + // The verification code, which may be used to verify other channels + // that have an equivalent identity (i.e. other channels of the same + // type with the same fingerprint such as other email channels with + // the same email address or other sms channels with the same number). + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // The expiration time associated with the code that was returned. If + // an expiration was provided in the request, this is the minimum of the + // requested expiration in the request and the max permitted expiration. + ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelVerificationCodeResponse) Reset() { + *m = GetNotificationChannelVerificationCodeResponse{} +} +func (m *GetNotificationChannelVerificationCodeResponse) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{11} +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Unmarshal(m, b) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Marshal(b, m, deterministic) +} +func (dst *GetNotificationChannelVerificationCodeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Merge(dst, src) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Size(m) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelVerificationCodeResponse proto.InternalMessageInfo + +func (m *GetNotificationChannelVerificationCodeResponse) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamp.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `VerifyNotificationChannel` request. +type VerifyNotificationChannelRequest struct { + // The notification channel to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The verification code that was delivered to the channel as + // a result of invoking the `SendNotificationChannelVerificationCode` API + // method or that was retrieved from a verified channel via + // `GetNotificationChannelVerificationCode`. For example, one might have + // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only + // guaranteed that the code is valid UTF-8; one should not + // make any assumptions regarding the structure or format of the code). + Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerifyNotificationChannelRequest) Reset() { *m = VerifyNotificationChannelRequest{} } +func (m *VerifyNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*VerifyNotificationChannelRequest) ProtoMessage() {} +func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_notification_service_e0bdcc277cafcf7d, []int{12} +} +func (m *VerifyNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerifyNotificationChannelRequest.Unmarshal(m, b) +} +func (m *VerifyNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerifyNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (dst *VerifyNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyNotificationChannelRequest.Merge(dst, src) +} +func (m *VerifyNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_VerifyNotificationChannelRequest.Size(m) +} +func (m *VerifyNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyNotificationChannelRequest proto.InternalMessageInfo + +func (m *VerifyNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VerifyNotificationChannelRequest) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func init() { + proto.RegisterType((*ListNotificationChannelDescriptorsRequest)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsRequest") + proto.RegisterType((*ListNotificationChannelDescriptorsResponse)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsResponse") + proto.RegisterType((*GetNotificationChannelDescriptorRequest)(nil), "google.monitoring.v3.GetNotificationChannelDescriptorRequest") + proto.RegisterType((*CreateNotificationChannelRequest)(nil), "google.monitoring.v3.CreateNotificationChannelRequest") + proto.RegisterType((*ListNotificationChannelsRequest)(nil), "google.monitoring.v3.ListNotificationChannelsRequest") + proto.RegisterType((*ListNotificationChannelsResponse)(nil), "google.monitoring.v3.ListNotificationChannelsResponse") + proto.RegisterType((*GetNotificationChannelRequest)(nil), "google.monitoring.v3.GetNotificationChannelRequest") + proto.RegisterType((*UpdateNotificationChannelRequest)(nil), "google.monitoring.v3.UpdateNotificationChannelRequest") + proto.RegisterType((*DeleteNotificationChannelRequest)(nil), "google.monitoring.v3.DeleteNotificationChannelRequest") + proto.RegisterType((*SendNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.SendNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeResponse)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeResponse") + proto.RegisterType((*VerifyNotificationChannelRequest)(nil), "google.monitoring.v3.VerifyNotificationChannelRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// NotificationChannelServiceClient is the client API for NotificationChannelService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NotificationChannelServiceClient interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) +} + +type notificationChannelServiceClient struct { + cc *grpc.ClientConn +} + +func NewNotificationChannelServiceClient(cc *grpc.ClientConn) NotificationChannelServiceClient { + return ¬ificationChannelServiceClient{cc} +} + +func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { + out := new(ListNotificationChannelDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { + out := new(NotificationChannelDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { + out := new(ListNotificationChannelsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { + out := new(GetNotificationChannelVerificationCodeResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotificationChannelServiceServer is the server API for NotificationChannelService service. +type NotificationChannelServiceServer interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) +} + +func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { + s.RegisterService(&_NotificationChannelService_serviceDesc, srv) +} + +func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.NotificationChannelService", + HandlerType: (*NotificationChannelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNotificationChannelDescriptors", + Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, + }, + { + MethodName: "GetNotificationChannelDescriptor", + Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, + }, + { + MethodName: "ListNotificationChannels", + Handler: _NotificationChannelService_ListNotificationChannels_Handler, + }, + { + MethodName: "GetNotificationChannel", + Handler: _NotificationChannelService_GetNotificationChannel_Handler, + }, + { + MethodName: "CreateNotificationChannel", + Handler: _NotificationChannelService_CreateNotificationChannel_Handler, + }, + { + MethodName: "UpdateNotificationChannel", + Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, + }, + { + MethodName: "DeleteNotificationChannel", + Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, + }, + { + MethodName: "SendNotificationChannelVerificationCode", + Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "GetNotificationChannelVerificationCode", + Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "VerifyNotificationChannel", + Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/notification_service.proto", +} + +func init() { + proto.RegisterFile("google/monitoring/v3/notification_service.proto", fileDescriptor_notification_service_e0bdcc277cafcf7d) +} + +var fileDescriptor_notification_service_e0bdcc277cafcf7d = []byte{ + // 1020 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x41, 0x6f, 0xdc, 0x44, + 0x14, 0xd6, 0xec, 0x26, 0x69, 0xfa, 0x22, 0x04, 0x9a, 0x86, 0xc8, 0xd9, 0xb6, 0xaa, 0xe5, 0x43, + 0x93, 0xae, 0x8a, 0x2d, 0xad, 0x4b, 0x84, 0x52, 0x52, 0xda, 0x64, 0xdb, 0x22, 0x48, 0x51, 0xb4, + 0x29, 0x91, 0x40, 0x11, 0x2b, 0xc7, 0x9e, 0x35, 0x26, 0xbb, 0x33, 0xc6, 0x33, 0x1b, 0x35, 0xad, + 0x2a, 0x15, 0xfe, 0x02, 0xfc, 0x01, 0x24, 0x4e, 0x3d, 0x20, 0xce, 0xa0, 0x72, 0x46, 0x5c, 0x11, + 0x5c, 0xb9, 0xc0, 0xff, 0x40, 0x1e, 0xcf, 0x66, 0x37, 0x9b, 0xf1, 0xae, 0xdd, 0xf6, 0xe6, 0x99, + 0x37, 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xf9, 0x3d, 0x1b, 0x9c, 0x90, 0xb1, 0xb0, 0x4b, 0x9c, 0x1e, + 0xa3, 0x91, 0x60, 0x49, 0x44, 0x43, 0xe7, 0xc8, 0x75, 0x28, 0x13, 0x51, 0x27, 0xf2, 0x3d, 0x11, + 0x31, 0xda, 0xe6, 0x24, 0x39, 0x8a, 0x7c, 0x62, 0xc7, 0x09, 0x13, 0x0c, 0x2f, 0x66, 0x17, 0xec, + 0xe1, 0x05, 0xfb, 0xc8, 0xad, 0x5d, 0x52, 0x6e, 0xbc, 0x38, 0x72, 0x3c, 0x4a, 0x99, 0x90, 0x57, + 0x79, 0x76, 0xa7, 0xb6, 0x32, 0x35, 0x88, 0x3a, 0x78, 0x51, 0x1d, 0x94, 0xab, 0x83, 0x7e, 0xc7, + 0x21, 0xbd, 0x58, 0x1c, 0x2b, 0xa3, 0x39, 0x6e, 0xec, 0x44, 0xa4, 0x1b, 0xb4, 0x7b, 0x1e, 0x3f, + 0x54, 0x27, 0x2e, 0x8d, 0x9f, 0xe0, 0x22, 0xe9, 0xfb, 0x42, 0x59, 0xaf, 0x8c, 0x5b, 0x45, 0xd4, + 0x23, 0x5c, 0x78, 0xbd, 0x38, 0x3b, 0x60, 0x3d, 0x81, 0x6b, 0xdb, 0x11, 0x17, 0x9f, 0x8c, 0xe0, + 0xda, 0xfa, 0xd2, 0xa3, 0x94, 0x74, 0x9b, 0x84, 0xfb, 0x49, 0x14, 0x0b, 0x96, 0xf0, 0x16, 0xf9, + 0xba, 0x4f, 0xb8, 0xc0, 0x18, 0x66, 0xa8, 0xd7, 0x23, 0xc6, 0x8c, 0x89, 0x56, 0xcf, 0xb7, 0xe4, + 0x33, 0xbe, 0x08, 0xe7, 0x63, 0x2f, 0x24, 0x6d, 0x1e, 0x3d, 0x26, 0x46, 0xc5, 0x44, 0xab, 0xb3, + 0xad, 0xf9, 0x74, 0x63, 0x37, 0x7a, 0x4c, 0xf0, 0x65, 0x00, 0x69, 0x14, 0xec, 0x90, 0x50, 0xa3, + 0x2a, 0xaf, 0xc9, 0xe3, 0x0f, 0xd3, 0x0d, 0xeb, 0x17, 0x04, 0xf5, 0x22, 0xd1, 0x79, 0xcc, 0x28, + 0x27, 0x38, 0x80, 0x0b, 0x7e, 0x66, 0x6d, 0x07, 0x43, 0xb3, 0x81, 0xcc, 0xea, 0xea, 0x42, 0xc3, + 0xb5, 0x75, 0x45, 0xb2, 0x27, 0xba, 0x6e, 0x61, 0xff, 0x4c, 0x34, 0x7c, 0x15, 0xde, 0xa4, 0xe4, + 0x91, 0x68, 0x8f, 0x00, 0xaf, 0x48, 0xe0, 0x6f, 0xa4, 0xdb, 0x3b, 0x27, 0xe0, 0x37, 0x60, 0xe5, + 0x3e, 0x99, 0x0c, 0x7d, 0x9c, 0xb7, 0xea, 0x90, 0x37, 0xeb, 0x7b, 0x04, 0xe6, 0x56, 0x42, 0x3c, + 0x41, 0x34, 0x2e, 0x26, 0x5c, 0xc4, 0xfb, 0xb0, 0x78, 0x4a, 0xaa, 0x2a, 0x05, 0x09, 0x72, 0xa1, + 0x71, 0xad, 0x30, 0x0d, 0xad, 0x0b, 0xf4, 0xec, 0xa6, 0xf5, 0x23, 0x82, 0x2b, 0x39, 0x25, 0x39, + 0x23, 0x83, 0xd9, 0x11, 0x54, 0x4b, 0x30, 0xd7, 0x89, 0xba, 0x82, 0x24, 0xc6, 0x9c, 0xdc, 0x55, + 0x2b, 0xbc, 0x0c, 0xf3, 0x2c, 0x09, 0x48, 0xd2, 0x3e, 0x38, 0x36, 0xce, 0x49, 0xcb, 0x39, 0xb9, + 0xde, 0x3c, 0x3e, 0xad, 0x9c, 0xea, 0x44, 0xe5, 0xcc, 0x8c, 0x2b, 0xe7, 0x39, 0x02, 0x33, 0x1f, + 0xa6, 0xd2, 0xcb, 0x17, 0xf0, 0xb6, 0x8e, 0x29, 0x6e, 0x54, 0xa5, 0x62, 0x4a, 0x50, 0xb5, 0xa8, + 0xa1, 0xaa, 0xb8, 0x52, 0x5c, 0xb8, 0xac, 0x57, 0xca, 0x24, 0x7d, 0xbc, 0x40, 0x60, 0x7e, 0x1a, + 0x07, 0x93, 0xf5, 0x71, 0x13, 0x16, 0xfa, 0xf2, 0x8c, 0xec, 0x08, 0x4a, 0x02, 0xb5, 0x41, 0x5e, + 0x83, 0x97, 0xde, 0xbe, 0x97, 0x36, 0x8d, 0x07, 0x1e, 0x3f, 0x6c, 0x41, 0x76, 0x3c, 0x7d, 0xce, + 0x15, 0x52, 0xf5, 0xb5, 0x08, 0x69, 0x1b, 0xcc, 0x26, 0xe9, 0x92, 0xd2, 0xf2, 0x5e, 0x84, 0xd9, + 0x0e, 0x4b, 0xfc, 0x4c, 0x5d, 0xf3, 0xad, 0x6c, 0x61, 0x35, 0xc1, 0xde, 0x25, 0x34, 0xd0, 0xf8, + 0xda, 0x23, 0xc9, 0x70, 0x8b, 0x05, 0x64, 0xdc, 0x37, 0x1a, 0xe1, 0xf4, 0x19, 0x82, 0x77, 0xf4, + 0x95, 0x28, 0xe1, 0x25, 0x25, 0x9d, 0x3c, 0x8a, 0xa3, 0x84, 0xb4, 0xd3, 0x66, 0x9a, 0x4b, 0xfa, + 0xc3, 0x41, 0xa7, 0x6d, 0x41, 0x76, 0x3c, 0xdd, 0xb0, 0xbe, 0x41, 0x60, 0x17, 0x85, 0xa0, 0x64, + 0x8c, 0x61, 0xc6, 0x67, 0xc1, 0x09, 0x86, 0xf4, 0xf9, 0xd5, 0x30, 0x7c, 0x04, 0xa6, 0x0c, 0x76, + 0x5c, 0xa0, 0x34, 0xa3, 0x89, 0x0f, 0x80, 0x54, 0x86, 0x40, 0x1a, 0xbf, 0xbe, 0x05, 0x35, 0x8d, + 0x9b, 0xdd, 0x6c, 0x7e, 0xe2, 0xff, 0x10, 0x58, 0xd3, 0x3b, 0x3c, 0xfe, 0x40, 0x2f, 0xb6, 0xc2, + 0x93, 0xa9, 0x76, 0xfb, 0xe5, 0x1d, 0x64, 0x2c, 0x5b, 0xef, 0x7f, 0xfb, 0xe7, 0xbf, 0xdf, 0x55, + 0xd6, 0xf0, 0x8d, 0x74, 0x4c, 0x3f, 0x49, 0xf3, 0xdd, 0x88, 0x13, 0xf6, 0x15, 0xf1, 0x05, 0x77, + 0xea, 0x4f, 0x1d, 0x3a, 0x39, 0x81, 0xbf, 0x11, 0x98, 0xd3, 0xa6, 0x01, 0xde, 0xd0, 0x83, 0x2c, + 0x38, 0x45, 0x6a, 0x2f, 0x33, 0xe1, 0xac, 0x5b, 0x32, 0xad, 0xf7, 0xf0, 0x9a, 0x2e, 0xad, 0x29, + 0x59, 0x39, 0xf5, 0xa7, 0xf8, 0x05, 0x02, 0x23, 0xaf, 0xd1, 0xe2, 0x77, 0x4b, 0xb1, 0x7e, 0x52, + 0xac, 0xb5, 0xb2, 0xd7, 0x54, 0x89, 0x1a, 0x32, 0x97, 0xeb, 0xb8, 0x5e, 0xb8, 0x44, 0x1c, 0xff, + 0x84, 0x60, 0x49, 0x4f, 0x30, 0x76, 0xcb, 0x94, 0x63, 0x80, 0xbd, 0x78, 0x5b, 0xb4, 0x6e, 0x48, + 0xb8, 0x36, 0xbe, 0x5e, 0x94, 0x7a, 0x49, 0xf8, 0xef, 0x08, 0x96, 0x73, 0xbf, 0x0b, 0x70, 0x0e, + 0x75, 0xd3, 0x3e, 0x24, 0xca, 0xc0, 0xfe, 0x50, 0xc2, 0xde, 0xb4, 0x4a, 0xb0, 0xbc, 0xae, 0x1d, + 0x24, 0xf8, 0x1f, 0x04, 0xcb, 0xb9, 0x23, 0x2c, 0x2f, 0x95, 0x69, 0x33, 0xaf, 0x4c, 0x2a, 0x6d, + 0x99, 0xca, 0x67, 0x8d, 0x3b, 0x59, 0x2a, 0x1a, 0x8c, 0x76, 0xc1, 0xb2, 0xe4, 0x64, 0xf8, 0x03, + 0x82, 0xe5, 0xdc, 0x29, 0x97, 0x97, 0xe1, 0xb4, 0xb1, 0x58, 0x5b, 0x3a, 0xd3, 0xc7, 0xef, 0xa6, + 0xbf, 0x04, 0x03, 0x41, 0xd5, 0xcb, 0x09, 0xea, 0x2f, 0x04, 0x2b, 0x05, 0x67, 0x27, 0x6e, 0xea, + 0x11, 0x97, 0x1b, 0xbd, 0xb9, 0xf8, 0xb7, 0x25, 0xfe, 0x7b, 0xd6, 0x9d, 0x32, 0xf8, 0xd7, 0x39, + 0xa1, 0xc1, 0x78, 0xa4, 0x75, 0x54, 0xc7, 0xcf, 0x2a, 0x70, 0xb5, 0xd8, 0x24, 0xc5, 0x5b, 0x65, + 0xde, 0xf4, 0xbc, 0xac, 0x9a, 0xaf, 0xe6, 0x44, 0xf5, 0xb0, 0x8f, 0x25, 0x07, 0x77, 0xad, 0xdb, + 0xa5, 0x38, 0x08, 0x89, 0xd0, 0x51, 0xf0, 0x1b, 0x82, 0xe5, 0xdc, 0x49, 0x9e, 0x27, 0xbf, 0x69, + 0xa3, 0xbf, 0xcc, 0x0b, 0xa6, 0xa6, 0x8b, 0xe5, 0x96, 0xca, 0xe6, 0x48, 0x22, 0x58, 0x47, 0xf5, + 0xcd, 0x9f, 0x11, 0x18, 0x3e, 0xeb, 0x69, 0x03, 0x6e, 0x1a, 0xa3, 0x11, 0xd5, 0x07, 0xc5, 0x4e, + 0xaa, 0xa8, 0x1d, 0xf4, 0xf9, 0x2d, 0x75, 0x23, 0x64, 0x5d, 0x8f, 0x86, 0x36, 0x4b, 0x42, 0x27, + 0x24, 0x54, 0xea, 0x4d, 0xfd, 0xdd, 0x7b, 0x71, 0xc4, 0x4f, 0xff, 0x7c, 0xdf, 0x1c, 0xae, 0x9e, + 0x57, 0x6a, 0xf7, 0x33, 0x07, 0x5b, 0x5d, 0xd6, 0x0f, 0xec, 0x07, 0xc3, 0xc0, 0x7b, 0xee, 0x1f, + 0x03, 0xe3, 0xbe, 0x34, 0xee, 0x0f, 0x8d, 0xfb, 0x7b, 0xee, 0xc1, 0x9c, 0x0c, 0xe2, 0xfe, 0x1f, + 0x00, 0x00, 0xff, 0xff, 0x57, 0x01, 0xd1, 0x1c, 0x45, 0x10, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go new file mode 100644 index 000000000..9778aa43e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go @@ -0,0 +1,96 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/span_context.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The context of a span, attached to google.api.Distribution.Exemplars +// in google.api.Distribution values during aggregation. +// +// It contains the name of a span with format: +// projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] +type SpanContext struct { + // The resource name of the span in the following format: + // + // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // [TRACE_ID] is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // [SPAN_ID] is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SpanContext) Reset() { *m = SpanContext{} } +func (m *SpanContext) String() string { return proto.CompactTextString(m) } +func (*SpanContext) ProtoMessage() {} +func (*SpanContext) Descriptor() ([]byte, []int) { + return fileDescriptor_span_context_0c1dbe9c6f1a1a54, []int{0} +} +func (m *SpanContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SpanContext.Unmarshal(m, b) +} +func (m *SpanContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SpanContext.Marshal(b, m, deterministic) +} +func (dst *SpanContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SpanContext.Merge(dst, src) +} +func (m *SpanContext) XXX_Size() int { + return xxx_messageInfo_SpanContext.Size(m) +} +func (m *SpanContext) XXX_DiscardUnknown() { + xxx_messageInfo_SpanContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SpanContext proto.InternalMessageInfo + +func (m *SpanContext) GetSpanName() string { + if m != nil { + return m.SpanName + } + return "" +} + +func init() { + proto.RegisterType((*SpanContext)(nil), "google.monitoring.v3.SpanContext") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/span_context.proto", fileDescriptor_span_context_0c1dbe9c6f1a1a54) +} + +var fileDescriptor_span_context_0c1dbe9c6f1a1a54 = []byte{ + // 197 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0x2f, 0x2e, 0x48, 0xcc, 0x8b, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd4, 0x43, 0x28, 0xd4, 0x2b, 0x33, 0x56, 0xd2, 0xe2, + 0xe2, 0x0e, 0x2e, 0x48, 0xcc, 0x73, 0x86, 0x28, 0x15, 0x92, 0xe6, 0xe2, 0x04, 0x6b, 0xcd, 0x4b, + 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, + 0x3a, 0xad, 0x60, 0xe4, 0x92, 0x48, 0xce, 0xcf, 0xd5, 0xc3, 0x66, 0x90, 0x93, 0x00, 0x92, 0x31, + 0x01, 0x20, 0x0b, 0x03, 0x18, 0xa3, 0xec, 0xa0, 0x2a, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, + 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0xce, 0xd1, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, + 0xa3, 0x3a, 0xdd, 0x1a, 0xc1, 0x5b, 0xc5, 0x24, 0xe5, 0x0e, 0x31, 0xc0, 0x39, 0x27, 0xbf, 0x34, + 0x45, 0xcf, 0x17, 0x61, 0x61, 0x98, 0xf1, 0x29, 0x98, 0x64, 0x0c, 0x58, 0x32, 0x06, 0x21, 0x19, + 0x13, 0x66, 0x9c, 0xc4, 0x06, 0xb6, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x19, 0x01, + 0xcb, 0x1e, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go new file mode 100644 index 000000000..9326d663f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go @@ -0,0 +1,969 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The regions from which an uptime check can be run. +type UptimeCheckRegion int32 + +const ( + // Default value if no region is specified. Will result in uptime checks + // running from all regions. + UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 + // Allows checks to run from locations within the United States of America. + UptimeCheckRegion_USA UptimeCheckRegion = 1 + // Allows checks to run from locations within the continent of Europe. + UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 + // Allows checks to run from locations within the continent of South + // America. + UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 + // Allows checks to run from locations within the Asia Pacific area (ex: + // Singapore). + UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 +) + +var UptimeCheckRegion_name = map[int32]string{ + 0: "REGION_UNSPECIFIED", + 1: "USA", + 2: "EUROPE", + 3: "SOUTH_AMERICA", + 4: "ASIA_PACIFIC", +} +var UptimeCheckRegion_value = map[string]int32{ + "REGION_UNSPECIFIED": 0, + "USA": 1, + "EUROPE": 2, + "SOUTH_AMERICA": 3, + "ASIA_PACIFIC": 4, +} + +func (x UptimeCheckRegion) String() string { + return proto.EnumName(UptimeCheckRegion_name, int32(x)) +} +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{0} +} + +// The supported resource types that can be used as values of +// `group_resource.resource_type`. +// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types. +// The resource types `gae_app` and `uptime_url` are not valid here because +// group checks on App Engine modules and URLs are not allowed. +type GroupResourceType int32 + +const ( + // Default value (not valid). + GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 + // A group of instances from Google Cloud Platform (GCP) or + // Amazon Web Services (AWS). + GroupResourceType_INSTANCE GroupResourceType = 1 + // A group of Amazon ELB load balancers. + GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 +) + +var GroupResourceType_name = map[int32]string{ + 0: "RESOURCE_TYPE_UNSPECIFIED", + 1: "INSTANCE", + 2: "AWS_ELB_LOAD_BALANCER", +} +var GroupResourceType_value = map[string]int32{ + "RESOURCE_TYPE_UNSPECIFIED": 0, + "INSTANCE": 1, + "AWS_ELB_LOAD_BALANCER": 2, +} + +func (x GroupResourceType) String() string { + return proto.EnumName(GroupResourceType_name, int32(x)) +} +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{1} +} + +// An internal checker allows uptime checks to run on private/internal GCP +// resources. +type InternalChecker struct { + // A unique resource name for this InternalChecker. The format is: + // + // + // `projects/[PROJECT_ID]/internalCheckers/[INTERNAL_CHECKER_ID]`. + // + // PROJECT_ID is the stackdriver workspace project for the + // uptime check config associated with the internal checker. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The checker's human-readable name. The display name + // should be unique within a Stackdriver Workspace in order to make it easier + // to identify; however, uniqueness is not enforced. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the + // internal resource lives (ex: "default"). + Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` + // The GCP zone the uptime check should egress from. Only respected for + // internal uptime checks, where internal_network is specified. + GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"` + // The GCP project_id where the internal checker lives. Not necessary + // the same as the workspace project. + PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalChecker) Reset() { *m = InternalChecker{} } +func (m *InternalChecker) String() string { return proto.CompactTextString(m) } +func (*InternalChecker) ProtoMessage() {} +func (*InternalChecker) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{0} +} +func (m *InternalChecker) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InternalChecker.Unmarshal(m, b) +} +func (m *InternalChecker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InternalChecker.Marshal(b, m, deterministic) +} +func (dst *InternalChecker) XXX_Merge(src proto.Message) { + xxx_messageInfo_InternalChecker.Merge(dst, src) +} +func (m *InternalChecker) XXX_Size() int { + return xxx_messageInfo_InternalChecker.Size(m) +} +func (m *InternalChecker) XXX_DiscardUnknown() { + xxx_messageInfo_InternalChecker.DiscardUnknown(m) +} + +var xxx_messageInfo_InternalChecker proto.InternalMessageInfo + +func (m *InternalChecker) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InternalChecker) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *InternalChecker) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *InternalChecker) GetGcpZone() string { + if m != nil { + return m.GcpZone + } + return "" +} + +func (m *InternalChecker) GetPeerProjectId() string { + if m != nil { + return m.PeerProjectId + } + return "" +} + +// This message configures which resources and services to monitor for +// availability. +type UptimeCheckConfig struct { + // A unique resource name for this UptimeCheckConfig. The format is: + // + // + // `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + // + // This field should be omitted when creating the uptime check configuration; + // on create, the resource name is assigned by the server and included in the + // response. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A human-friendly name for the uptime check configuration. The display name + // should be unique within a Stackdriver Workspace in order to make it easier + // to identify; however, uniqueness is not enforced. Required. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The resource the check is checking. Required. + // + // Types that are valid to be assigned to Resource: + // *UptimeCheckConfig_MonitoredResource + // *UptimeCheckConfig_ResourceGroup_ + Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` + // The type of uptime check request. + // + // Types that are valid to be assigned to CheckRequestType: + // *UptimeCheckConfig_HttpCheck_ + // *UptimeCheckConfig_TcpCheck_ + CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` + // How often, in seconds, the uptime check is performed. + // Currently, the only supported values are `60s` (1 minute), `300s` + // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, + // defaults to `300s`. + Period *duration.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` + // The maximum amount of time to wait for the request to complete (must be + // between 1 and 60 seconds). Required. + Timeout *duration.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The expected content on the page the check is run against. + // Currently, only the first entry in the list is supported, and other entries + // will be ignored. The server will look for an exact match of the string in + // the page response's content. This field is optional and should only be + // specified if a content match is required. + ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"` + // The list of regions from which the check will be run. + // Some regions contain one location, and others contain more than one. + // If this field is specified, enough regions to include a minimum of + // 3 locations must be provided, or an error message is returned. + // Not specifying this field will result in uptime checks running from all + // regions. + SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` + // If this is true, then checks are made only from the 'internal_checkers'. + // If it is false, then checks are made only from the 'selected_regions'. + // It is an error to provide 'selected_regions' when is_internal is true, + // or to provide 'internal_checkers' when is_internal is false. + IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"` + // The internal checkers that this check will egress from. If `is_internal` is + // true and this list is empty, the check will egress from all the + // InternalCheckers configured for the project that owns this CheckConfig. + InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} } +func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig) ProtoMessage() {} +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{1} +} +func (m *UptimeCheckConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig.Unmarshal(m, b) +} +func (m *UptimeCheckConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig.Marshal(b, m, deterministic) +} +func (dst *UptimeCheckConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig.Merge(dst, src) +} +func (m *UptimeCheckConfig) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig.Size(m) +} +func (m *UptimeCheckConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig proto.InternalMessageInfo + +func (m *UptimeCheckConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UptimeCheckConfig) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +type isUptimeCheckConfig_Resource interface { + isUptimeCheckConfig_Resource() +} + +type UptimeCheckConfig_MonitoredResource struct { + MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` +} + +type UptimeCheckConfig_ResourceGroup_ struct { + ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` +} + +func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} + +func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource { + if x, ok := m.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { + return x.MonitoredResource + } + return nil +} + +func (m *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { + if x, ok := m.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { + return x.ResourceGroup + } + return nil +} + +type isUptimeCheckConfig_CheckRequestType interface { + isUptimeCheckConfig_CheckRequestType() +} + +type UptimeCheckConfig_HttpCheck_ struct { + HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"` +} + +type UptimeCheckConfig_TcpCheck_ struct { + TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { + if m != nil { + return m.CheckRequestType + } + return nil +} + +func (m *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { + return x.HttpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { + return x.TcpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetPeriod() *duration.Duration { + if m != nil { + return m.Period + } + return nil +} + +func (m *UptimeCheckConfig) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { + if m != nil { + return m.ContentMatchers + } + return nil +} + +func (m *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { + if m != nil { + return m.SelectedRegions + } + return nil +} + +func (m *UptimeCheckConfig) GetIsInternal() bool { + if m != nil { + return m.IsInternal + } + return false +} + +func (m *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker { + if m != nil { + return m.InternalCheckers + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*UptimeCheckConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _UptimeCheckConfig_OneofMarshaler, _UptimeCheckConfig_OneofUnmarshaler, _UptimeCheckConfig_OneofSizer, []interface{}{ + (*UptimeCheckConfig_MonitoredResource)(nil), + (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_HttpCheck_)(nil), + (*UptimeCheckConfig_TcpCheck_)(nil), + } +} + +func _UptimeCheckConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*UptimeCheckConfig) + // resource + switch x := m.Resource.(type) { + case *UptimeCheckConfig_MonitoredResource: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MonitoredResource); err != nil { + return err + } + case *UptimeCheckConfig_ResourceGroup_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResourceGroup); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UptimeCheckConfig.Resource has unexpected type %T", x) + } + // check_request_type + switch x := m.CheckRequestType.(type) { + case *UptimeCheckConfig_HttpCheck_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HttpCheck); err != nil { + return err + } + case *UptimeCheckConfig_TcpCheck_: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TcpCheck); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UptimeCheckConfig.CheckRequestType has unexpected type %T", x) + } + return nil +} + +func _UptimeCheckConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*UptimeCheckConfig) + switch tag { + case 3: // resource.monitored_resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(monitoredres.MonitoredResource) + err := b.DecodeMessage(msg) + m.Resource = &UptimeCheckConfig_MonitoredResource{msg} + return true, err + case 4: // resource.resource_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_ResourceGroup) + err := b.DecodeMessage(msg) + m.Resource = &UptimeCheckConfig_ResourceGroup_{msg} + return true, err + case 5: // check_request_type.http_check + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_HttpCheck) + err := b.DecodeMessage(msg) + m.CheckRequestType = &UptimeCheckConfig_HttpCheck_{msg} + return true, err + case 6: // check_request_type.tcp_check + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_TcpCheck) + err := b.DecodeMessage(msg) + m.CheckRequestType = &UptimeCheckConfig_TcpCheck_{msg} + return true, err + default: + return false, nil + } +} + +func _UptimeCheckConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*UptimeCheckConfig) + // resource + switch x := m.Resource.(type) { + case *UptimeCheckConfig_MonitoredResource: + s := proto.Size(x.MonitoredResource) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *UptimeCheckConfig_ResourceGroup_: + s := proto.Size(x.ResourceGroup) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // check_request_type + switch x := m.CheckRequestType.(type) { + case *UptimeCheckConfig_HttpCheck_: + s := proto.Size(x.HttpCheck) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *UptimeCheckConfig_TcpCheck_: + s := proto.Size(x.TcpCheck) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The resource submessage for group checks. It can be used instead of a +// monitored resource, when multiple resources are being monitored. +type UptimeCheckConfig_ResourceGroup struct { + // The group of resources being monitored. Should be only the + // group_id, not projects//groups/. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + // The resource type of the group members. + ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConfig_ResourceGroup{} } +func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} +func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{1, 0} +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Marshal(b, m, deterministic) +} +func (dst *UptimeCheckConfig_ResourceGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Merge(dst, src) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Size(m) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_ResourceGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_ResourceGroup proto.InternalMessageInfo + +func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string { + if m != nil { + return m.GroupId + } + return "" +} + +func (m *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { + if m != nil { + return m.ResourceType + } + return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED +} + +// Information involved in an HTTP/HTTPS uptime check request. +type UptimeCheckConfig_HttpCheck struct { + // If true, use HTTPS instead of HTTP to run the check. + UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + // The path to the page to run the check against. Will be combined with the + // host (specified within the MonitoredResource) and port to construct the + // full URL. Optional (defaults to "/"). + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // The port to the page to run the check against. Will be combined with host + // (specified within the MonitoredResource) and path to construct the full + // URL. Optional (defaults to 80 without SSL, or 443 with SSL). + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // The authentication information. Optional when creating an HTTP check; + // defaults to empty. + AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` + // Boolean specifiying whether to encrypt the header information. + // Encryption should be specified for any headers related to authentication + // that you do not wish to be seen when retrieving the configuration. The + // server will be responsible for encrypting the headers. + // On Get/List calls, if mask_headers is set to True then the headers + // will be obscured with ******. + MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"` + // The list of headers to send as part of the uptime check request. + // If two headers have the same key and different values, they should + // be entered as a single header, with the value being a comma-separated + // list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). + // Entering two separate headers with the same key in a Create call will + // cause the first to be overwritten by the second. + // The maximum number of headers allowed is 100. + Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} } +func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{1, 1} +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Marshal(b, m, deterministic) +} +func (dst *UptimeCheckConfig_HttpCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_HttpCheck.Merge(dst, src) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Size(m) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_HttpCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_HttpCheck proto.InternalMessageInfo + +func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { + if m != nil { + return m.UseSsl + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { + if m != nil { + return m.AuthInfo + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { + if m != nil { + return m.MaskHeaders + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { + if m != nil { + return m.Headers + } + return nil +} + +// A type of authentication to perform against the specified resource or URL +// that uses username and password. +// Currently, only Basic authentication is supported in Uptime Monitoring. +type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { + // The username to authenticate. + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + // The password to authenticate. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { + *m = UptimeCheckConfig_HttpCheck_BasicAuthentication{} +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { + return proto.CompactTextString(m) +} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{1, 1, 0} +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Marshal(b, m, deterministic) +} +func (dst *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Merge(dst, src) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Size(m) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication proto.InternalMessageInfo + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +// Information required for a TCP uptime check request. +type UptimeCheckConfig_TcpCheck struct { + // The port to the page to run the check against. Will be combined with host + // (specified within the MonitoredResource) to construct the full URL. + // Required. + Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} } +func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{1, 2} +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Marshal(b, m, deterministic) +} +func (dst *UptimeCheckConfig_TcpCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_TcpCheck.Merge(dst, src) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Size(m) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_TcpCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_TcpCheck proto.InternalMessageInfo + +func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +// Used to perform string matching. It allows substring and regular +// expressions, together with their negations. +type UptimeCheckConfig_ContentMatcher struct { + // String or regex content to match (max 1024 bytes) + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckConfig_ContentMatcher{} } +func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} +func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{1, 3} +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Marshal(b, m, deterministic) +} +func (dst *UptimeCheckConfig_ContentMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Merge(dst, src) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Size(m) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_ContentMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_ContentMatcher proto.InternalMessageInfo + +func (m *UptimeCheckConfig_ContentMatcher) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +// Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + // A broad region category in which the IP address is located. + Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` + // A more specific location within the region that typically encodes + // a particular city/town/metro (and its containing state/province or country) + // within the broader umbrella region category. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The IP address from which the uptime check originates. This is a full + // IP address (not an IP address range). Most IP addresses, as of this + // publication, are in IPv4 format; however, one should not rely on the + // IP addresses being in IPv4 format indefinitely and should support + // interpreting this field in either IPv4 or IPv6 format. + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} } +func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckIp) ProtoMessage() {} +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_9dda9bdcd2304945, []int{2} +} +func (m *UptimeCheckIp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckIp.Unmarshal(m, b) +} +func (m *UptimeCheckIp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckIp.Marshal(b, m, deterministic) +} +func (dst *UptimeCheckIp) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckIp.Merge(dst, src) +} +func (m *UptimeCheckIp) XXX_Size() int { + return xxx_messageInfo_UptimeCheckIp.Size(m) +} +func (m *UptimeCheckIp) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckIp.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckIp proto.InternalMessageInfo + +func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion { + if m != nil { + return m.Region + } + return UptimeCheckRegion_REGION_UNSPECIFIED +} + +func (m *UptimeCheckIp) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *UptimeCheckIp) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func init() { + proto.RegisterType((*InternalChecker)(nil), "google.monitoring.v3.InternalChecker") + proto.RegisterType((*UptimeCheckConfig)(nil), "google.monitoring.v3.UptimeCheckConfig") + proto.RegisterType((*UptimeCheckConfig_ResourceGroup)(nil), "google.monitoring.v3.UptimeCheckConfig.ResourceGroup") + proto.RegisterType((*UptimeCheckConfig_HttpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry") + proto.RegisterType((*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication") + proto.RegisterType((*UptimeCheckConfig_TcpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.TcpCheck") + proto.RegisterType((*UptimeCheckConfig_ContentMatcher)(nil), "google.monitoring.v3.UptimeCheckConfig.ContentMatcher") + proto.RegisterType((*UptimeCheckIp)(nil), "google.monitoring.v3.UptimeCheckIp") + proto.RegisterEnum("google.monitoring.v3.UptimeCheckRegion", UptimeCheckRegion_name, UptimeCheckRegion_value) + proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value) +} + +func init() { + proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor_uptime_9dda9bdcd2304945) +} + +var fileDescriptor_uptime_9dda9bdcd2304945 = []byte{ + // 1036 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xed, 0x6e, 0xe3, 0x44, + 0x17, 0xae, 0x9b, 0x36, 0x1f, 0x27, 0xfd, 0x70, 0xe7, 0xed, 0x0b, 0x6e, 0xa4, 0x2e, 0xdd, 0x22, + 0xa0, 0xea, 0x8f, 0x84, 0x6d, 0x04, 0x42, 0x8b, 0xb4, 0xc8, 0x49, 0x4d, 0x13, 0xa9, 0x4d, 0xa2, + 0x49, 0xb3, 0xc0, 0x52, 0x31, 0x72, 0xed, 0xa9, 0x63, 0x9a, 0x78, 0x8c, 0x67, 0xdc, 0xa5, 0xdc, + 0x02, 0x97, 0x81, 0xf8, 0xc3, 0x15, 0x70, 0x0d, 0x5c, 0x00, 0xd7, 0x83, 0x66, 0xec, 0x49, 0x9b, + 0xb6, 0x68, 0x5b, 0xfe, 0xcd, 0x73, 0x3e, 0x9e, 0x39, 0x73, 0xe6, 0x3c, 0x63, 0xc3, 0xf3, 0x80, + 0xb1, 0x60, 0x42, 0x1b, 0x53, 0x16, 0x85, 0x82, 0x25, 0x61, 0x14, 0x34, 0xae, 0x9a, 0x8d, 0x34, + 0x16, 0xe1, 0x94, 0xd6, 0xe3, 0x84, 0x09, 0x86, 0x36, 0xb3, 0x90, 0xfa, 0x4d, 0x48, 0xfd, 0xaa, + 0x59, 0xfb, 0x30, 0x4f, 0x74, 0xe3, 0x50, 0x27, 0x53, 0x9f, 0x24, 0x94, 0xb3, 0x34, 0xf1, 0xf2, + 0xd4, 0xda, 0xb3, 0x3c, 0x48, 0xa1, 0xf3, 0xf4, 0xa2, 0xe1, 0xa7, 0x89, 0x2b, 0x42, 0x16, 0x65, + 0xfe, 0xdd, 0xdf, 0x0d, 0x58, 0xef, 0x46, 0x82, 0x26, 0x91, 0x3b, 0x69, 0x8f, 0xa9, 0x77, 0x49, + 0x13, 0x84, 0x60, 0x29, 0x72, 0xa7, 0xd4, 0x32, 0x76, 0x8c, 0xbd, 0x0a, 0x56, 0x6b, 0xf4, 0x1c, + 0x56, 0xfc, 0x90, 0xc7, 0x13, 0xf7, 0x9a, 0x28, 0xdf, 0xa2, 0xf2, 0x55, 0x73, 0x5b, 0x4f, 0x86, + 0x58, 0x50, 0x8a, 0xa8, 0x78, 0xcb, 0x92, 0x4b, 0xab, 0xa0, 0xbc, 0x1a, 0xa2, 0x2d, 0x28, 0x07, + 0x5e, 0x4c, 0x7e, 0x61, 0x11, 0xb5, 0x96, 0x32, 0x57, 0xe0, 0xc5, 0x6f, 0x58, 0x44, 0xd1, 0xc7, + 0xb0, 0x1e, 0x53, 0x9a, 0x90, 0x38, 0x61, 0x3f, 0x52, 0x4f, 0x90, 0xd0, 0xb7, 0x8a, 0x2a, 0x62, + 0x55, 0x9a, 0x07, 0x99, 0xb5, 0xeb, 0xef, 0xfe, 0x5d, 0x85, 0x8d, 0x91, 0xea, 0x89, 0xaa, 0xb2, + 0xcd, 0xa2, 0x8b, 0x30, 0xf8, 0xaf, 0x95, 0xf6, 0x00, 0xdd, 0x6f, 0x98, 0x2a, 0xba, 0x7a, 0xb0, + 0x5d, 0xcf, 0x9b, 0xed, 0xc6, 0x61, 0xfd, 0x44, 0x47, 0xe1, 0x3c, 0xa8, 0xb3, 0x80, 0x37, 0xa6, + 0x77, 0x8d, 0xe8, 0x07, 0x58, 0xd3, 0x2c, 0x24, 0x48, 0x58, 0x1a, 0xab, 0x53, 0x56, 0x0f, 0x3e, + 0xab, 0x3f, 0x74, 0x71, 0xf5, 0x7b, 0xe7, 0xa8, 0x6b, 0xa6, 0x23, 0x99, 0xdc, 0x59, 0xc0, 0xab, + 0xc9, 0x6d, 0x03, 0xc2, 0x00, 0x63, 0x21, 0x62, 0xe2, 0xc9, 0x14, 0x6b, 0x59, 0x71, 0xbf, 0x78, + 0x2c, 0x77, 0x47, 0x88, 0x58, 0xe1, 0x8e, 0x81, 0x2b, 0x63, 0x0d, 0x50, 0x1f, 0x2a, 0xc2, 0xd3, + 0x94, 0x45, 0x45, 0xf9, 0xe9, 0x63, 0x29, 0x4f, 0xbd, 0x19, 0x63, 0x59, 0xe4, 0x6b, 0xf4, 0x02, + 0x8a, 0x31, 0x4d, 0x42, 0xe6, 0x5b, 0x25, 0xc5, 0xb6, 0xa5, 0xd9, 0xf4, 0xe8, 0xd5, 0x0f, 0xf3, + 0xd1, 0xc3, 0x79, 0x20, 0x6a, 0x42, 0x49, 0x52, 0xb3, 0x54, 0x58, 0xe5, 0x77, 0xe5, 0xe8, 0x48, + 0xe4, 0x82, 0xe9, 0xb1, 0x48, 0xd0, 0x48, 0x90, 0xa9, 0x2b, 0xbc, 0x31, 0x4d, 0xb8, 0x55, 0xd9, + 0x29, 0xec, 0x55, 0x0f, 0x3e, 0x7f, 0x6c, 0xfd, 0xed, 0x2c, 0xff, 0x24, 0x4b, 0xc7, 0xeb, 0xde, + 0x1c, 0xe6, 0x08, 0x83, 0xc9, 0xe9, 0x84, 0x7a, 0x42, 0x8d, 0x47, 0x10, 0xb2, 0x88, 0x5b, 0xb0, + 0x53, 0xd8, 0x5b, 0x3b, 0xf8, 0xe4, 0x9d, 0x5b, 0x60, 0x15, 0x8f, 0xd7, 0x35, 0x41, 0x86, 0x39, + 0xfa, 0x00, 0xaa, 0x21, 0x27, 0x61, 0x2e, 0x35, 0x6b, 0x7d, 0xc7, 0xd8, 0x2b, 0x63, 0x08, 0xb9, + 0x16, 0x1f, 0xc2, 0xb0, 0xa1, 0xbd, 0xd9, 0xad, 0xc8, 0x83, 0xad, 0xa9, 0x83, 0x7d, 0xf4, 0xf0, + 0xae, 0x77, 0x74, 0x8b, 0xcd, 0x70, 0xde, 0xc0, 0x6b, 0x3f, 0xc3, 0xea, 0xdc, 0x68, 0x29, 0x25, + 0xca, 0x85, 0xd4, 0x99, 0x91, 0x2b, 0x51, 0xe2, 0xae, 0x8f, 0x8e, 0x61, 0x36, 0x75, 0x44, 0x5c, + 0xc7, 0x99, 0x70, 0xfe, 0xf5, 0xc4, 0x8a, 0x4e, 0x73, 0x9f, 0x5e, 0xc7, 0x14, 0xaf, 0x24, 0xb7, + 0x50, 0xed, 0xcf, 0x02, 0x54, 0x66, 0x93, 0x87, 0xde, 0x87, 0x52, 0xca, 0x29, 0xe1, 0x7c, 0xa2, + 0x76, 0x2d, 0xe3, 0x62, 0xca, 0xe9, 0x90, 0x4f, 0xa4, 0x80, 0x63, 0x57, 0x8c, 0x73, 0x91, 0xaa, + 0xb5, 0xb2, 0xb1, 0x44, 0x28, 0x3d, 0x2e, 0x63, 0xb5, 0x46, 0xe7, 0x50, 0x71, 0x53, 0x31, 0x26, + 0x61, 0x74, 0xc1, 0x72, 0x71, 0x39, 0x4f, 0x16, 0x40, 0xbd, 0xe5, 0xf2, 0xd0, 0xb3, 0x53, 0x31, + 0xa6, 0x91, 0x08, 0xbd, 0x6c, 0xae, 0xca, 0x92, 0xb7, 0x1b, 0x5d, 0x30, 0xf9, 0x70, 0x4c, 0x5d, + 0x7e, 0x49, 0xc6, 0xd4, 0xf5, 0x65, 0xef, 0x97, 0x55, 0xa5, 0x55, 0x69, 0xeb, 0x64, 0x26, 0xf4, + 0x2d, 0x94, 0xb4, 0xb7, 0xa8, 0x6e, 0xe6, 0xd5, 0xd3, 0x8b, 0xc8, 0xb9, 0x9c, 0x48, 0x24, 0xd7, + 0x58, 0xd3, 0xd5, 0x4e, 0xe0, 0x7f, 0x0f, 0x54, 0x87, 0x6a, 0x50, 0x4e, 0xb9, 0xbc, 0xd3, 0xd9, + 0x23, 0x37, 0xc3, 0xd2, 0x17, 0xbb, 0x9c, 0xbf, 0x65, 0x89, 0x9f, 0xf7, 0x6f, 0x86, 0x6b, 0x2f, + 0x61, 0xe5, 0xf6, 0x3e, 0xc8, 0x84, 0xc2, 0x25, 0xbd, 0xce, 0x29, 0xe4, 0x12, 0x6d, 0xc2, 0xf2, + 0x95, 0x3b, 0x49, 0xf5, 0xfb, 0x98, 0x81, 0x97, 0x8b, 0x5f, 0x18, 0xb5, 0x67, 0x50, 0xd6, 0x02, + 0x9f, 0xdd, 0x85, 0x71, 0x73, 0x17, 0xb5, 0x7d, 0x58, 0x9b, 0x17, 0x90, 0x7c, 0xf9, 0x73, 0x09, + 0xe9, 0xa1, 0xca, 0x61, 0x0b, 0xa0, 0xac, 0xc7, 0xa2, 0xb5, 0x09, 0x48, 0xcd, 0x35, 0x49, 0xe8, + 0x4f, 0x29, 0xe5, 0x42, 0x4d, 0xd9, 0xee, 0xaf, 0x06, 0xac, 0xde, 0x6a, 0x57, 0x37, 0x46, 0x5f, + 0x41, 0x31, 0x13, 0x9d, 0x22, 0x7b, 0x82, 0xe6, 0xf2, 0x34, 0xd9, 0x98, 0x09, 0xcb, 0x1a, 0xa8, + 0x1b, 0xa3, 0x31, 0xda, 0x06, 0x08, 0x63, 0xe2, 0xfa, 0x7e, 0x42, 0x39, 0xcf, 0xbf, 0x53, 0x95, + 0x30, 0xb6, 0x33, 0xc3, 0x3e, 0x9d, 0xfb, 0xca, 0x64, 0xbc, 0xe8, 0x3d, 0x40, 0xd8, 0x39, 0xea, + 0xf6, 0x7b, 0x64, 0xd4, 0x1b, 0x0e, 0x9c, 0x76, 0xf7, 0xeb, 0xae, 0x73, 0x68, 0x2e, 0xa0, 0x12, + 0x14, 0x46, 0x43, 0xdb, 0x34, 0x10, 0x40, 0xd1, 0x19, 0xe1, 0xfe, 0xc0, 0x31, 0x17, 0xd1, 0x06, + 0xac, 0x0e, 0xfb, 0xa3, 0xd3, 0x0e, 0xb1, 0x4f, 0x1c, 0xdc, 0x6d, 0xdb, 0x66, 0x01, 0x99, 0xb0, + 0x62, 0x0f, 0xbb, 0x36, 0x19, 0xd8, 0x32, 0xb5, 0x6d, 0x2e, 0xed, 0x7f, 0x0f, 0x1b, 0xf7, 0x04, + 0x84, 0xb6, 0x61, 0x0b, 0x3b, 0xc3, 0xfe, 0x08, 0xb7, 0x1d, 0x72, 0xfa, 0xdd, 0xc0, 0xb9, 0xb3, + 0xdb, 0x0a, 0x94, 0xbb, 0xbd, 0xe1, 0xa9, 0xdd, 0x6b, 0x3b, 0xa6, 0x81, 0xb6, 0xe0, 0xff, 0xf6, + 0x37, 0x43, 0xe2, 0x1c, 0xb7, 0xc8, 0x71, 0xdf, 0x3e, 0x24, 0x2d, 0xfb, 0x58, 0x7a, 0xb0, 0xb9, + 0xd8, 0xfa, 0xcd, 0x00, 0xcb, 0x63, 0xd3, 0x07, 0xbb, 0xd6, 0xaa, 0x66, 0xc7, 0x1b, 0xc8, 0xf7, + 0x75, 0x60, 0xbc, 0x79, 0x95, 0x07, 0x05, 0x6c, 0xe2, 0x46, 0x41, 0x9d, 0x25, 0x41, 0x23, 0xa0, + 0x91, 0x7a, 0x7d, 0x1b, 0x99, 0xcb, 0x8d, 0x43, 0x3e, 0xff, 0x6f, 0xf2, 0xe5, 0x0d, 0xfa, 0x63, + 0xb1, 0x76, 0x94, 0x11, 0xb4, 0x27, 0x2c, 0xf5, 0xf5, 0xf7, 0x52, 0xee, 0xf5, 0xba, 0xf9, 0x97, + 0x76, 0x9e, 0x29, 0xe7, 0xd9, 0x8d, 0xf3, 0xec, 0x75, 0xf3, 0xbc, 0xa8, 0x36, 0x69, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x0a, 0x0a, 0xbb, 0x6b, 0xff, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go new file mode 100644 index 000000000..5192f4a18 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go @@ -0,0 +1,786 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime_service.proto + +package monitoring // import "google.golang.org/genproto/googleapis/monitoring/v3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import field_mask "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The protocol for the `ListUptimeCheckConfigs` request. +type ListUptimeCheckConfigsRequest struct { + // The project whose uptime check configurations are listed. The format + // is `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} } +func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{0} +} +func (m *ListUptimeCheckConfigsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Unmarshal(m, b) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Marshal(b, m, deterministic) +} +func (dst *ListUptimeCheckConfigsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckConfigsRequest.Merge(dst, src) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Size(m) +} +func (m *ListUptimeCheckConfigsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckConfigsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckConfigsRequest proto.InternalMessageInfo + +func (m *ListUptimeCheckConfigsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListUptimeCheckConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckConfigs` response. +type ListUptimeCheckConfigsResponse struct { + // The returned uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of uptime check configurations for the project, + // irrespective of any pagination. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} } +func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{1} +} +func (m *ListUptimeCheckConfigsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Unmarshal(m, b) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Marshal(b, m, deterministic) +} +func (dst *ListUptimeCheckConfigsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckConfigsResponse.Merge(dst, src) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Size(m) +} +func (m *ListUptimeCheckConfigsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckConfigsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckConfigsResponse proto.InternalMessageInfo + +func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfigs + } + return nil +} + +func (m *ListUptimeCheckConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListUptimeCheckConfigsResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +// The protocol for the `GetUptimeCheckConfig` request. +type GetUptimeCheckConfigRequest struct { + // The uptime check configuration to retrieve. The format + // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} } +func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetUptimeCheckConfigRequest) ProtoMessage() {} +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{2} +} +func (m *GetUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *GetUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (dst *GetUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUptimeCheckConfigRequest.Merge(dst, src) +} +func (m *GetUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Size(m) +} +func (m *GetUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *GetUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `CreateUptimeCheckConfig` request. +type CreateUptimeCheckConfigRequest struct { + // The project in which to create the uptime check. The format + // is `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The new uptime check configuration. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} } +func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{3} +} +func (m *CreateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (dst *CreateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUptimeCheckConfigRequest.Merge(dst, src) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Size(m) +} +func (m *CreateUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *CreateUptimeCheckConfigRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `UpdateUptimeCheckConfig` request. +type UpdateUptimeCheckConfigRequest struct { + // Optional. If present, only the listed fields in the current uptime check + // configuration are updated with values from the new configuration. If this + // field is empty, then the current configuration is completely replaced with + // the new configuration. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. If an `"updateMask"` has been specified, this field gives + // the values for the set of fields mentioned in the `"updateMask"`. If an + // `"updateMask"` has not been given, this uptime check configuration replaces + // the current configuration. If a field is mentioned in `"updateMask"` but + // the corresonding field is omitted in this partial uptime check + // configuration, it has the effect of deleting/clearing the field from the + // configuration on the server. + // + // The following fields can be updated: `display_name`, + // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and + // `selected_regions`. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} } +func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{4} +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUptimeCheckConfigRequest.Merge(dst, src) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Size(m) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `DeleteUptimeCheckConfig` request. +type DeleteUptimeCheckConfigRequest struct { + // The uptime check configuration to delete. The format + // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} } +func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{5} +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUptimeCheckConfigRequest.Merge(dst, src) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Size(m) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *DeleteUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` request. +type ListUptimeCheckIpsRequest struct { + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + // NOTE: this field is not yet implemented + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + // NOTE: this field is not yet implemented + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} } +func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsRequest) ProtoMessage() {} +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{6} +} +func (m *ListUptimeCheckIpsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Unmarshal(m, b) +} +func (m *ListUptimeCheckIpsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Marshal(b, m, deterministic) +} +func (dst *ListUptimeCheckIpsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckIpsRequest.Merge(dst, src) +} +func (m *ListUptimeCheckIpsRequest) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Size(m) +} +func (m *ListUptimeCheckIpsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckIpsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckIpsRequest proto.InternalMessageInfo + +func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckIpsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` response. +type ListUptimeCheckIpsResponse struct { + // The returned list of IP addresses (including region and location) that the + // checkers run from. + UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + // NOTE: this field is not yet implemented + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} } +func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsResponse) ProtoMessage() {} +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_uptime_service_c74f83143a8cb5a4, []int{7} +} +func (m *ListUptimeCheckIpsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Unmarshal(m, b) +} +func (m *ListUptimeCheckIpsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Marshal(b, m, deterministic) +} +func (dst *ListUptimeCheckIpsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckIpsResponse.Merge(dst, src) +} +func (m *ListUptimeCheckIpsResponse) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Size(m) +} +func (m *ListUptimeCheckIpsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckIpsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckIpsResponse proto.InternalMessageInfo + +func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { + if m != nil { + return m.UptimeCheckIps + } + return nil +} + +func (m *ListUptimeCheckIpsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*ListUptimeCheckConfigsRequest)(nil), "google.monitoring.v3.ListUptimeCheckConfigsRequest") + proto.RegisterType((*ListUptimeCheckConfigsResponse)(nil), "google.monitoring.v3.ListUptimeCheckConfigsResponse") + proto.RegisterType((*GetUptimeCheckConfigRequest)(nil), "google.monitoring.v3.GetUptimeCheckConfigRequest") + proto.RegisterType((*CreateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.CreateUptimeCheckConfigRequest") + proto.RegisterType((*UpdateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.UpdateUptimeCheckConfigRequest") + proto.RegisterType((*DeleteUptimeCheckConfigRequest)(nil), "google.monitoring.v3.DeleteUptimeCheckConfigRequest") + proto.RegisterType((*ListUptimeCheckIpsRequest)(nil), "google.monitoring.v3.ListUptimeCheckIpsRequest") + proto.RegisterType((*ListUptimeCheckIpsResponse)(nil), "google.monitoring.v3.ListUptimeCheckIpsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UptimeCheckServiceClient is the client API for UptimeCheckService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UptimeCheckServiceClient interface { + // Lists the existing valid uptime check configurations for the project, + // leaving out any invalid configurations. + ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) + // Gets a single uptime check configuration. + GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Creates a new uptime check configuration. + CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Updates an uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `"updateMask"`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Deletes an uptime check configuration. Note that this method will fail + // if the uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Returns the list of IPs that checkers run from + ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) +} + +type uptimeCheckServiceClient struct { + cc *grpc.ClientConn +} + +func NewUptimeCheckServiceClient(cc *grpc.ClientConn) UptimeCheckServiceClient { + return &uptimeCheckServiceClient{cc} +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { + out := new(ListUptimeCheckConfigsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { + out := new(ListUptimeCheckIpsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UptimeCheckServiceServer is the server API for UptimeCheckService service. +type UptimeCheckServiceServer interface { + // Lists the existing valid uptime check configurations for the project, + // leaving out any invalid configurations. + ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) + // Gets a single uptime check configuration. + GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Creates a new uptime check configuration. + CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Updates an uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `"updateMask"`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Deletes an uptime check configuration. Note that this method will fail + // if the uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*empty.Empty, error) + // Returns the list of IPs that checkers run from + ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) +} + +func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { + s.RegisterService(&_UptimeCheckService_serviceDesc, srv) +} + +func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckIpsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.UptimeCheckService", + HandlerType: (*UptimeCheckServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUptimeCheckConfigs", + Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, + }, + { + MethodName: "GetUptimeCheckConfig", + Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, + }, + { + MethodName: "CreateUptimeCheckConfig", + Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, + }, + { + MethodName: "UpdateUptimeCheckConfig", + Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, + }, + { + MethodName: "DeleteUptimeCheckConfig", + Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, + }, + { + MethodName: "ListUptimeCheckIps", + Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/uptime_service.proto", +} + +func init() { + proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor_uptime_service_c74f83143a8cb5a4) +} + +var fileDescriptor_uptime_service_c74f83143a8cb5a4 = []byte{ + // 747 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0xd6, 0x24, 0xbd, 0x55, 0x7b, 0xaa, 0x7b, 0x2f, 0x0c, 0x51, 0x1b, 0x5c, 0x1a, 0x05, 0x23, + 0x41, 0x89, 0x90, 0x4d, 0x93, 0xae, 0xa8, 0xa8, 0x44, 0x03, 0x54, 0x95, 0xa8, 0x54, 0xa5, 0xb4, + 0x15, 0x50, 0x29, 0x72, 0xd3, 0xa9, 0x31, 0x49, 0x3c, 0xc6, 0x33, 0xae, 0xa0, 0xa8, 0x1b, 0xde, + 0x00, 0x75, 0xc9, 0x9e, 0x45, 0x1f, 0x00, 0xd6, 0xb0, 0x41, 0x62, 0x8b, 0x78, 0x03, 0x1e, 0x04, + 0x79, 0x3c, 0x26, 0x7f, 0x63, 0xe3, 0x88, 0x5d, 0x3c, 0xe7, 0xcc, 0x39, 0xdf, 0xf9, 0xfc, 0x9d, + 0x2f, 0x86, 0x9b, 0x36, 0xa5, 0x76, 0x87, 0x98, 0x5d, 0xea, 0x3a, 0x9c, 0xfa, 0x8e, 0x6b, 0x9b, + 0xc7, 0x35, 0x33, 0xf0, 0xb8, 0xd3, 0x25, 0x4d, 0x46, 0xfc, 0x63, 0xa7, 0x45, 0x0c, 0xcf, 0xa7, + 0x9c, 0xe2, 0x42, 0x94, 0x6a, 0xf4, 0x52, 0x8d, 0xe3, 0x9a, 0x76, 0x45, 0x16, 0xb0, 0x3c, 0xc7, + 0xb4, 0x5c, 0x97, 0x72, 0x8b, 0x3b, 0xd4, 0x65, 0xd1, 0x1d, 0xed, 0x6a, 0x4a, 0x79, 0x99, 0x32, + 0x2f, 0x53, 0xc4, 0xd3, 0x41, 0x70, 0x64, 0x92, 0xae, 0xc7, 0x5f, 0xcb, 0x60, 0x79, 0x38, 0x78, + 0xe4, 0x90, 0xce, 0x61, 0xb3, 0x6b, 0xb1, 0x76, 0x94, 0xa1, 0x33, 0x58, 0x78, 0xe4, 0x30, 0xbe, + 0x23, 0x4a, 0xd6, 0x9f, 0x93, 0x56, 0xbb, 0x4e, 0xdd, 0x23, 0xc7, 0x66, 0x0d, 0xf2, 0x32, 0x20, + 0x8c, 0xe3, 0x59, 0x98, 0xf4, 0x2c, 0x9f, 0xb8, 0xbc, 0x88, 0xca, 0x68, 0x71, 0xba, 0x21, 0x9f, + 0xf0, 0x3c, 0x4c, 0x7b, 0x96, 0x4d, 0x9a, 0xcc, 0x39, 0x21, 0xc5, 0x7c, 0x19, 0x2d, 0xfe, 0xd3, + 0x98, 0x0a, 0x0f, 0xb6, 0x9d, 0x13, 0x82, 0x17, 0x00, 0x44, 0x90, 0xd3, 0x36, 0x71, 0x8b, 0x13, + 0xe2, 0xa2, 0x48, 0x7f, 0x1c, 0x1e, 0xe8, 0x5f, 0x10, 0x94, 0x92, 0xba, 0x32, 0x8f, 0xba, 0x8c, + 0xe0, 0x27, 0x50, 0x90, 0x2c, 0xb6, 0xc2, 0x70, 0xb3, 0x15, 0xc5, 0x8b, 0xa8, 0x9c, 0x5f, 0x9c, + 0xa9, 0xde, 0x30, 0x54, 0x64, 0x1a, 0x23, 0xf5, 0x1a, 0x38, 0x18, 0x69, 0x81, 0xaf, 0xc3, 0xff, + 0x2e, 0x79, 0xc5, 0x9b, 0x7d, 0x08, 0x73, 0x02, 0xe1, 0xbf, 0xe1, 0xf1, 0x56, 0x8c, 0x32, 0x1c, + 0x82, 0x53, 0x6e, 0x75, 0xfa, 0x47, 0x9c, 0x16, 0x27, 0xe1, 0x8c, 0xfa, 0x12, 0xcc, 0xaf, 0x93, + 0xd1, 0x11, 0x62, 0xde, 0x30, 0x4c, 0xb8, 0x56, 0x97, 0x48, 0xd6, 0xc4, 0x6f, 0xfd, 0x1d, 0x82, + 0x52, 0xdd, 0x27, 0x16, 0x27, 0x89, 0xd7, 0x92, 0xe8, 0xde, 0x83, 0x4b, 0x0a, 0x3e, 0x04, 0xf0, + 0x31, 0xe8, 0xb8, 0x38, 0x42, 0x87, 0xfe, 0x11, 0x41, 0x69, 0xc7, 0x3b, 0x4c, 0xc3, 0xb4, 0x02, + 0x33, 0x81, 0xc8, 0x10, 0xc2, 0x91, 0x3d, 0xb5, 0xb8, 0x67, 0xac, 0x2d, 0xe3, 0x61, 0xa8, 0xad, + 0x4d, 0x8b, 0xb5, 0x1b, 0x10, 0xa5, 0x87, 0xbf, 0x93, 0x80, 0xe7, 0xff, 0x1a, 0xf8, 0x32, 0x94, + 0xee, 0x93, 0x0e, 0x49, 0xc1, 0xad, 0x7a, 0x05, 0x7b, 0x70, 0x79, 0x48, 0x79, 0x1b, 0xde, 0x6f, + 0xad, 0x0f, 0x68, 0x3a, 0x97, 0xaa, 0xe9, 0xfc, 0xb0, 0xa6, 0xcf, 0x10, 0x68, 0xaa, 0xca, 0x52, + 0xcf, 0x9b, 0x70, 0x61, 0x80, 0x06, 0xc7, 0x8b, 0xb5, 0x7c, 0xed, 0x8f, 0x1c, 0x6c, 0x78, 0x8d, + 0xff, 0x82, 0x81, 0xb2, 0x59, 0x35, 0x5c, 0xfd, 0x3a, 0x05, 0xb8, 0xaf, 0xd2, 0x76, 0xe4, 0x48, + 0xf8, 0x13, 0x82, 0x59, 0xf5, 0x02, 0xe2, 0x9a, 0x1a, 0x4e, 0xaa, 0x49, 0x68, 0xcb, 0xe3, 0x5d, + 0x8a, 0x38, 0xd1, 0xab, 0x6f, 0xbf, 0xff, 0x3c, 0xcb, 0xdd, 0xc2, 0x95, 0xd0, 0xd4, 0xde, 0x44, + 0x42, 0xbf, 0xeb, 0xf9, 0xf4, 0x05, 0x69, 0x71, 0x66, 0x56, 0x4e, 0x4d, 0xc5, 0xf2, 0x7e, 0x40, + 0x50, 0x50, 0xad, 0x1d, 0x5e, 0x52, 0x43, 0x48, 0x59, 0x51, 0x2d, 0xab, 0xfa, 0x86, 0x80, 0x86, + 0x3a, 0xea, 0x83, 0xa9, 0x40, 0x69, 0x56, 0x4e, 0xf1, 0x67, 0x04, 0x73, 0x09, 0xbb, 0x8e, 0x13, + 0xe8, 0x4a, 0xb7, 0x86, 0xec, 0x70, 0xd7, 0x05, 0xdc, 0x7b, 0xfa, 0x18, 0xbc, 0xde, 0x51, 0x2d, + 0x29, 0xfe, 0x81, 0x60, 0x2e, 0xc1, 0x1b, 0x92, 0x66, 0x48, 0xb7, 0x92, 0xec, 0x33, 0x3c, 0x13, + 0x33, 0xec, 0x54, 0x57, 0xc5, 0x0c, 0x0a, 0x70, 0x46, 0xa6, 0xd7, 0xa0, 0x9e, 0xeb, 0x3d, 0x82, + 0xb9, 0x04, 0xef, 0x48, 0x9a, 0x2b, 0xdd, 0x6a, 0xb4, 0xd9, 0x11, 0x37, 0x7c, 0x10, 0xfe, 0x0d, + 0xc7, 0xca, 0xa9, 0x8c, 0xa3, 0x9c, 0x33, 0x04, 0x78, 0xd4, 0x49, 0xb0, 0x99, 0x69, 0xc7, 0x7a, + 0x6e, 0xa6, 0xdd, 0xce, 0x7e, 0x41, 0x2e, 0xa4, 0x26, 0xd0, 0x16, 0x30, 0xee, 0x7d, 0x65, 0xc4, + 0x39, 0x6b, 0xe7, 0x08, 0x8a, 0x2d, 0xda, 0x55, 0xd6, 0x5c, 0x93, 0x1e, 0x23, 0xed, 0x65, 0x2b, + 0xe4, 0x60, 0x0b, 0x3d, 0x5d, 0x95, 0xb9, 0x36, 0xed, 0x58, 0xae, 0x6d, 0x50, 0xdf, 0x36, 0x6d, + 0xe2, 0x0a, 0x86, 0xcc, 0x28, 0x64, 0x79, 0x0e, 0x1b, 0xfc, 0xb8, 0x59, 0xe9, 0x3d, 0x9d, 0xe7, + 0xb4, 0xf5, 0xa8, 0x40, 0xbd, 0x43, 0x83, 0x43, 0x63, 0xb3, 0xd7, 0x72, 0xb7, 0xf6, 0x2d, 0x0e, + 0xee, 0x8b, 0xe0, 0x7e, 0x2f, 0xb8, 0xbf, 0x5b, 0x3b, 0x98, 0x14, 0x4d, 0x6a, 0xbf, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x27, 0xb8, 0x65, 0x92, 0x9f, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..7bfe37a3d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +package status // import "google.golang.org/genproto/googleapis/rpc/status" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_status_c6e4de62dcdf2edf, []int{0} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (dst *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(dst, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) } + +var fileDescriptor_status_c6e4de62dcdf2edf = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go new file mode 100644 index 000000000..86886693f --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -0,0 +1,280 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package field_mask // import "google.golang.org/genproto/protobuf/field_mask" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (m *FieldMask) String() string { return proto.CompactTextString(m) } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_field_mask_02a8b0c0831edcce, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldMask.Unmarshal(m, b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) +} +func (dst *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(dst, src) +} +func (m *FieldMask) XXX_Size() int { + return xxx_messageInfo_FieldMask.Size(m) +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { + proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_field_mask_02a8b0c0831edcce) +} + +var fileDescriptor_field_mask_02a8b0c0831edcce = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c, + 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01, + 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, + 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, + 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a, + 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..97c6e2568 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 000000000..a78e702ba --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,391 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" +) + +// Address represents a server the client connects to. +// +// Deprecated: please use package balancer. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// +// Deprecated: please use package balancer. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// +// Deprecated: please use package balancer. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = status.Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..c266f4ec1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,364 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to notify gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOption) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will + // block until UpdateBalancerState() is called and will call pick on the + // new picker. The done function returned from Pick(), if not nil, will be + // called with nil error, no bytes sent and no bytes received. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, + // with the final status of that RPC. If the SubConn returned is not a + // valid SubConn type, done may not be called. done may be nil if balancer + // doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateSubConnState will be called instead. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateClientConnState will be called instead. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + ConnectivityState connectivity.State + // TODO: add last connection error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// V2Balancer is defined for documentation purposes. If a Balancer also +// implements V2Balancer, its UpdateClientConnState method will be called +// instead of HandleResolvedAddrs and its UpdateSubConnState will be called +// instead of HandleSubConnStateChange. +type V2Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. + UpdateClientConnState(ClientConnState) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000..e587d8d11 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "context" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: NewErrPicker(balancer.ErrNoSubConnAvailable), + config: bb.config, + } +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not implemented") +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { + // TODO: handle s.ResolverState.Err (log if not nil) once implemented. + // TODO: handle s.ResolverState.ServiceConfig? + grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range s.ResolverState.Addresses { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + return + } + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not implemented") +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000..34b1f2994 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return NewBalancerBuilderWithConfig(name, pb, Config{}) +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. +func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go new file mode 100644 index 000000000..78b1c537a --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -0,0 +1,839 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 // import "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LoadBalanceRequest struct { + // Types that are valid to be assigned to LoadBalanceRequestType: + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } +func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceRequest) ProtoMessage() {} +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{0} +} +func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b) +} +func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic) +} +func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceRequest.Merge(dst, src) +} +func (m *LoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_LoadBalanceRequest.Size(m) +} +func (m *LoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` +} + +type LoadBalanceRequest_ClientStats struct { + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (m *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } +} + +func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialRequest); err != nil { + return err + } + case *LoadBalanceRequest_ClientStats: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientStats); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceRequest) + switch tag { + case 1: // load_balance_request_type.initial_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceRequest) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} + return true, err + case 2: // load_balance_request_type.client_stats + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientStats) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + s := proto.Size(x.InitialRequest) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceRequest_ClientStats: + s := proto.Size(x.ClientStats) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceRequest struct { + // The name of the load balanced service (e.g., service.googleapis.com). Its + // length should be less than 256 bytes. + // The name might include a port number. How to handle the port number is up + // to the balancer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } +func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceRequest) ProtoMessage() {} +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{1} +} +func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b) +} +func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic) +} +func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src) +} +func (m *InitialLoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceRequest.Size(m) +} +func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Contains the number of calls finished for a particular load balance token. +type ClientStatsPerToken struct { + // See Server.load_balance_token. + LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // The total number of RPCs that finished associated with the token. + NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStatsPerToken) Reset() { *m = ClientStatsPerToken{} } +func (m *ClientStatsPerToken) String() string { return proto.CompactTextString(m) } +func (*ClientStatsPerToken) ProtoMessage() {} +func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{2} +} +func (m *ClientStatsPerToken) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStatsPerToken.Unmarshal(m, b) +} +func (m *ClientStatsPerToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStatsPerToken.Marshal(b, m, deterministic) +} +func (dst *ClientStatsPerToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStatsPerToken.Merge(dst, src) +} +func (m *ClientStatsPerToken) XXX_Size() int { + return xxx_messageInfo_ClientStatsPerToken.Size(m) +} +func (m *ClientStatsPerToken) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStatsPerToken.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStatsPerToken proto.InternalMessageInfo + +func (m *ClientStatsPerToken) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *ClientStatsPerToken) GetNumCalls() int64 { + if m != nil { + return m.NumCalls + } + return 0 +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + // The timestamp of generating the report. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` + // The list of dropped calls. + CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{3} +} +func (m *ClientStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStats.Unmarshal(m, b) +} +func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) +} +func (dst *ClientStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStats.Merge(dst, src) +} +func (m *ClientStats) XXX_Size() int { + return xxx_messageInfo_ClientStats.Size(m) +} +func (m *ClientStats) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStats proto.InternalMessageInfo + +func (m *ClientStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} + +func (m *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { + if m != nil { + return m.CallsFinishedWithDrop + } + return nil +} + +type LoadBalanceResponse struct { + // Types that are valid to be assigned to LoadBalanceResponseType: + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } +func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceResponse) ProtoMessage() {} +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{4} +} +func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b) +} +func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic) +} +func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceResponse.Merge(dst, src) +} +func (m *LoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_LoadBalanceResponse.Size(m) +} +func (m *LoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` +} + +type LoadBalanceResponse_ServerList struct { + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (m *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + } +} + +func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialResponse); err != nil { + return err + } + case *LoadBalanceResponse_ServerList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceResponse) + switch tag { + case 1: // load_balance_response_type.initial_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceResponse) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} + return true, err + case 2: // load_balance_response_type.server_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerList) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + s := proto.Size(x.InitialResponse) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceResponse_ServerList: + s := proto.Size(x.ServerList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceResponse struct { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate,proto3" json:"load_balancer_delegate,omitempty"` + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *duration.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } +func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceResponse) ProtoMessage() {} +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{5} +} +func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b) +} +func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic) +} +func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src) +} +func (m *InitialLoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceResponse.Size(m) +} +func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} + +func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *duration.Duration { + if m != nil { + return m.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerList) Reset() { *m = ServerList{} } +func (m *ServerList) String() string { return proto.CompactTextString(m) } +func (*ServerList) ProtoMessage() {} +func (*ServerList) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{6} +} +func (m *ServerList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerList.Unmarshal(m, b) +} +func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerList.Marshal(b, m, deterministic) +} +func (dst *ServerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerList.Merge(dst, src) +} +func (m *ServerList) XXX_Size() int { + return xxx_messageInfo_ServerList.Size(m) +} +func (m *ServerList) XXX_DiscardUnknown() { + xxx_messageInfo_ServerList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerList proto.InternalMessageInfo + +func (m *ServerList) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +// Contains server information. When the drop field is not true, use the other +// fields. +type Server struct { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // An opaque but printable token for load reporting. The client must include + // the token of the picked server into the initial metadata when it starts a + // call to that server. The token is used by the server to verify the request + // and to allow the server to report load to the gRPC LB system. The token is + // also used in client stats for reporting dropped calls. + // + // Its length can be variable but must be less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client. + // If the request is dropped, there will be a corresponding entry in + // ClientStats.calls_finished_with_drop. + Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_load_balancer_12026aec3f0251ba, []int{7} +} +func (m *Server) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Server.Unmarshal(m, b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) +} +func (dst *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(dst, src) +} +func (m *Server) XXX_Size() int { + return xxx_messageInfo_Server.Size(m) +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress + } + return nil +} + +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *Server) GetDrop() bool { + if m != nil { + return m.Drop + } + return false +} + +func init() { + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStatsPerToken)(nil), "grpc.lb.v1.ClientStatsPerToken") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LoadBalancerClient is the client API for LoadBalancer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := c.cc.NewStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*LoadBalanceRequest) error + Recv() (*LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { + m := new(LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadBalancerServer is the server API for LoadBalancer service. +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { + s.RegisterService(&_LoadBalancer_serviceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*LoadBalanceResponse) error + Recv() (*LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { + m := new(LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/lb/v1/load_balancer.proto", +} + +func init() { + proto.RegisterFile("grpc/lb/v1/load_balancer.proto", fileDescriptor_load_balancer_12026aec3f0251ba) +} + +var fileDescriptor_load_balancer_12026aec3f0251ba = []byte{ + // 752 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xdd, 0x6e, 0x23, 0x35, + 0x14, 0xee, 0x90, 0x69, 0x36, 0x39, 0x29, 0x34, 0xeb, 0x85, 0x65, 0x92, 0xdd, 0x6d, 0x4b, 0x24, + 0x56, 0x11, 0x2a, 0x13, 0x52, 0xb8, 0x00, 0x89, 0x0b, 0x48, 0xab, 0x2a, 0x2d, 0xbd, 0x88, 0x9c, + 0x4a, 0x45, 0x95, 0x90, 0x99, 0xc9, 0xb8, 0xa9, 0x55, 0xc7, 0x1e, 0x3c, 0x4e, 0x2a, 0xae, 0x79, + 0x1f, 0xc4, 0x2b, 0x20, 0x5e, 0x0c, 0x8d, 0xed, 0x49, 0xa6, 0x49, 0xa3, 0xbd, 0xca, 0xf8, 0x9c, + 0xcf, 0xdf, 0xf9, 0xfd, 0x1c, 0x38, 0x98, 0xaa, 0x74, 0xd2, 0xe3, 0x71, 0x6f, 0xd1, 0xef, 0x71, + 0x19, 0x25, 0x24, 0x8e, 0x78, 0x24, 0x26, 0x54, 0x85, 0xa9, 0x92, 0x5a, 0x22, 0xc8, 0xfd, 0x21, + 0x8f, 0xc3, 0x45, 0xbf, 0x7d, 0x30, 0x95, 0x72, 0xca, 0x69, 0xcf, 0x78, 0xe2, 0xf9, 0x5d, 0x2f, + 0x99, 0xab, 0x48, 0x33, 0x29, 0x2c, 0xb6, 0x7d, 0xb8, 0xee, 0xd7, 0x6c, 0x46, 0x33, 0x1d, 0xcd, + 0x52, 0x0b, 0xe8, 0xfc, 0xeb, 0x01, 0xba, 0x92, 0x51, 0x32, 0xb0, 0x31, 0x30, 0xfd, 0x63, 0x4e, + 0x33, 0x8d, 0x46, 0xb0, 0xcf, 0x04, 0xd3, 0x2c, 0xe2, 0x44, 0x59, 0x53, 0xe0, 0x1d, 0x79, 0xdd, + 0xc6, 0xc9, 0x97, 0xe1, 0x2a, 0x7a, 0x78, 0x61, 0x21, 0x9b, 0xf7, 0x87, 0x3b, 0xf8, 0x13, 0x77, + 0xbf, 0x60, 0xfc, 0x11, 0xf6, 0x26, 0x9c, 0x51, 0xa1, 0x49, 0xa6, 0x23, 0x9d, 0x05, 0x1f, 0x19, + 0xba, 0xcf, 0xcb, 0x74, 0xa7, 0xc6, 0x3f, 0xce, 0xdd, 0xc3, 0x1d, 0xdc, 0x98, 0xac, 0x8e, 0x83, + 0x37, 0xd0, 0x2a, 0xb7, 0xa2, 0x48, 0x8a, 0xe8, 0x3f, 0x53, 0xda, 0xe9, 0x41, 0x6b, 0x6b, 0x26, + 0x08, 0x81, 0x2f, 0xa2, 0x19, 0x35, 0xe9, 0xd7, 0xb1, 0xf9, 0xee, 0xfc, 0x0e, 0xaf, 0x4a, 0xb1, + 0x46, 0x54, 0x5d, 0xcb, 0x07, 0x2a, 0xd0, 0x31, 0xa0, 0x27, 0x41, 0x74, 0x6e, 0x75, 0x17, 0x9b, + 0x7c, 0x45, 0x6d, 0xd1, 0x6f, 0xa0, 0x2e, 0xe6, 0x33, 0x32, 0x89, 0x38, 0xb7, 0xd5, 0x54, 0x70, + 0x4d, 0xcc, 0x67, 0xa7, 0xf9, 0xb9, 0xf3, 0x4f, 0x05, 0x1a, 0xa5, 0x10, 0xe8, 0x7b, 0xa8, 0x2f, + 0x3b, 0xef, 0x3a, 0xd9, 0x0e, 0xed, 0x6c, 0xc2, 0x62, 0x36, 0xe1, 0x75, 0x81, 0xc0, 0x2b, 0x30, + 0xfa, 0x0a, 0x5e, 0x2e, 0xc3, 0xe4, 0xad, 0x53, 0x9a, 0x26, 0x2e, 0xdc, 0x7e, 0x11, 0x6e, 0x6c, + 0xcd, 0x79, 0x01, 0x2b, 0xec, 0x1d, 0x13, 0x2c, 0xbb, 0xa7, 0x49, 0x50, 0x31, 0xe0, 0x66, 0x01, + 0x3e, 0x77, 0x76, 0xf4, 0x1b, 0x7c, 0xbd, 0x89, 0x26, 0x8f, 0x4c, 0xdf, 0x13, 0x37, 0xa9, 0xbb, + 0x88, 0x71, 0x9a, 0x10, 0x2d, 0x49, 0x46, 0x45, 0x12, 0x54, 0x0d, 0xd1, 0xfb, 0x75, 0xa2, 0x1b, + 0xa6, 0xef, 0x6d, 0xad, 0xe7, 0x06, 0x7f, 0x2d, 0xc7, 0x54, 0x24, 0x68, 0x08, 0x5f, 0x3c, 0x43, + 0xff, 0x20, 0xe4, 0xa3, 0x20, 0x8a, 0x4e, 0x28, 0x5b, 0xd0, 0x24, 0x78, 0x61, 0x28, 0xdf, 0xad, + 0x53, 0xfe, 0x92, 0xa3, 0xb0, 0x03, 0xa1, 0x5f, 0x21, 0x78, 0x2e, 0xc9, 0x44, 0xc9, 0x34, 0xa8, + 0x1d, 0x55, 0xba, 0x8d, 0x93, 0xc3, 0x2d, 0x6b, 0x54, 0x8c, 0x16, 0x7f, 0x36, 0x59, 0xcf, 0xf8, + 0x4c, 0xc9, 0xf4, 0xd2, 0xaf, 0xf9, 0xcd, 0xdd, 0x4b, 0xbf, 0xb6, 0xdb, 0xac, 0x76, 0xfe, 0xf3, + 0xe0, 0xd5, 0x93, 0xfd, 0xc9, 0x52, 0x29, 0x32, 0x8a, 0xc6, 0xd0, 0x5c, 0x49, 0xc1, 0xda, 0xdc, + 0x04, 0xdf, 0x7f, 0x48, 0x0b, 0x16, 0x3d, 0xdc, 0xc1, 0xfb, 0x4b, 0x31, 0x38, 0xd2, 0x1f, 0xa0, + 0x91, 0x51, 0xb5, 0xa0, 0x8a, 0x70, 0x96, 0x69, 0x27, 0x86, 0xd7, 0x65, 0xbe, 0xb1, 0x71, 0x5f, + 0x31, 0x23, 0x26, 0xc8, 0x96, 0xa7, 0xc1, 0x5b, 0x68, 0xaf, 0x49, 0xc1, 0x72, 0x5a, 0x2d, 0xfc, + 0xed, 0x41, 0x7b, 0x7b, 0x2a, 0xe8, 0x3b, 0x78, 0xfd, 0xe4, 0x49, 0x21, 0x09, 0xe5, 0x74, 0x1a, + 0xe9, 0x42, 0x1f, 0x9f, 0x96, 0xd6, 0x5c, 0x9d, 0x39, 0x1f, 0xba, 0x85, 0xb7, 0x65, 0xed, 0x12, + 0x45, 0x53, 0xa9, 0x34, 0x61, 0x42, 0x53, 0xb5, 0x88, 0xb8, 0x4b, 0xbf, 0xb5, 0xb1, 0xd0, 0x67, + 0xee, 0x31, 0xc2, 0xad, 0x92, 0x96, 0xb1, 0xb9, 0x7c, 0xe1, 0xee, 0x76, 0x7e, 0x02, 0x58, 0x95, + 0x8a, 0x8e, 0xe1, 0x85, 0x2d, 0x35, 0x0b, 0x3c, 0x33, 0x59, 0xb4, 0xd9, 0x13, 0x5c, 0x40, 0x2e, + 0xfd, 0x5a, 0xa5, 0xe9, 0x77, 0xfe, 0xf2, 0xa0, 0x6a, 0x3d, 0xe8, 0x1d, 0x00, 0x4b, 0x49, 0x94, + 0x24, 0x8a, 0x66, 0x99, 0x29, 0x69, 0x0f, 0xd7, 0x59, 0xfa, 0xb3, 0x35, 0xe4, 0x6f, 0x41, 0x1e, + 0xdb, 0xe4, 0xbb, 0x8b, 0xcd, 0xf7, 0x16, 0xd1, 0x57, 0xb6, 0x88, 0x1e, 0x81, 0x6f, 0xd6, 0xce, + 0x3f, 0xf2, 0xba, 0x35, 0x6c, 0xbe, 0xed, 0xfa, 0x9c, 0xc4, 0xb0, 0x57, 0x6a, 0xb8, 0x42, 0x18, + 0x1a, 0xee, 0x3b, 0x37, 0xa3, 0x83, 0x72, 0x1d, 0x9b, 0xcf, 0x54, 0xfb, 0x70, 0xab, 0xdf, 0x4e, + 0xae, 0xeb, 0x7d, 0xe3, 0x0d, 0x6e, 0xe0, 0x63, 0x26, 0x4b, 0xc0, 0xc1, 0xcb, 0x72, 0xc8, 0x51, + 0xde, 0xf6, 0x91, 0x77, 0xdb, 0x77, 0x63, 0x98, 0x4a, 0x1e, 0x89, 0x69, 0x28, 0xd5, 0xb4, 0x67, + 0xfe, 0x51, 0x8a, 0x99, 0x9b, 0x13, 0x8f, 0xcd, 0x0f, 0xe1, 0x31, 0x59, 0xf4, 0xe3, 0xaa, 0x19, + 0xd9, 0xb7, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x81, 0x14, 0xee, 0xd1, 0x7b, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go new file mode 100644 index 000000000..d881a9211 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -0,0 +1,485 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package grpclb defines a grpclb balancer. +// +// To install grpclb balancer, import this package as: +// import _ "google.golang.org/grpc/balancer/grpclb" +package grpclb + +import ( + "context" + "errors" + "strconv" + "sync" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/resolver" +) + +const ( + lbTokeyKey = "lb-token" + defaultFallbackTimeout = 10 * time.Second + grpclbName = "grpclb" +) + +var ( + // defaultBackoffConfig configures the backoff strategy that's used when the + // init handshake in the RPC is unsuccessful. It's not for the clientconn + // reconnect backoff. + // + // It has the same value as the default grpc.DefaultBackoffConfig. + // + // TODO: make backoff configurable. + defaultBackoffConfig = backoff.Exponential{ + MaxDelay: 120 * time.Second, + } + errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") +) + +func convertDuration(d *durationpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { + desc := &grpc.StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + grpc.ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func init() { + balancer.Register(newLBBuilder()) +} + +// newLBBuilder creates a builder for grpclb. +func newLBBuilder() balancer.Builder { + return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) +} + +// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given +// fallbackTimeout. If no response is received from the remote balancer within +// fallbackTimeout, the backend addresses from the resolved address list will be +// used. +// +// Only call this function when a non-default fallback timeout is needed. +func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { + return &lbBuilder{ + fallbackTimeout: fallbackTimeout, + } +} + +type lbBuilder struct { + fallbackTimeout time.Duration +} + +func (b *lbBuilder) Name() string { + return grpclbName +} + +func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + // This generates a manual resolver builder with a random scheme. This + // scheme will be used to dial to remote LB, so we can send filtered address + // updates to remote LB ClientConn using this manual resolver. + scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36) + r := &lbManualResolver{scheme: scheme, ccb: cc} + + lb := &lbBalancer{ + cc: newLBCacheClientConn(cc), + target: opt.Target.Endpoint, + opt: opt, + fallbackTimeout: b.fallbackTimeout, + doneCh: make(chan struct{}), + + manualResolver: r, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + clientStats: newRPCStats(), + backoff: defaultBackoffConfig, // TODO: make backoff configurable. + } + + var err error + if opt.CredsBundle != nil { + lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + } + lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + } + } + + return lb +} + +type lbBalancer struct { + cc *lbCacheClientConn + target string + opt balancer.BuildOptions + + usePickFirst bool + + // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb + // servers. If it's nil, use the TransportCredentials from BuildOptions + // instead. + grpclbClientConnCreds credentials.Bundle + // grpclbBackendCreds is the creds bundle to be used for addresses that are + // returned by grpclb server. If it's nil, don't set anything when creating + // SubConns. + grpclbBackendCreds credentials.Bundle + + fallbackTimeout time.Duration + doneCh chan struct{} + + // manualResolver is used in the remote LB ClientConn inside grpclb. When + // resolved address updates are received by grpclb, filtered updates will be + // send to remote LB ClientConn through this resolver. + manualResolver *lbManualResolver + // The ClientConn to talk to the remote balancer. + ccRemoteLB *grpc.ClientConn + // backoff for calling remote balancer. + backoff backoff.Strategy + + // Support client side load reporting. Each picker gets a reference to this, + // and will update its content. + clientStats *rpcStats + + mu sync.Mutex // guards everything following. + // The full server list including drops, used to check if the newly received + // serverList contains anything new. Each generate picker will also have + // reference to this list to do the first layer pick. + fullServerList []*lbpb.Server + // Backend addresses. It's kept so the addresses are available when + // switching between round_robin and pickfirst. + backendAddrs []resolver.Address + // All backends addresses, with metadata set to nil. This list contains all + // backend addresses in the same order and with the same duplicates as in + // serverlist. When generating picker, a SubConn slice with the same order + // but with only READY SCs will be gerenated. + backendAddrsWithoutMetadata []resolver.Address + // Roundrobin functionalities. + state connectivity.State + subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. + picker balancer.Picker + // Support fallback to resolved backend addresses if there's no response + // from remote balancer within fallbackTimeout. + remoteBalancerConnected bool + serverListReceived bool + inFallback bool + // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set + // when resolved address updates are received, and read in the goroutine + // handling fallback. + resolvedBackendAddrs []resolver.Address +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker from +// it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// Caller must hold lb.mu. +func (lb *lbBalancer) regeneratePicker(resetDrop bool) { + if lb.state == connectivity.TransientFailure { + lb.picker = &errPicker{err: balancer.ErrTransientFailure} + return + } + + if lb.state == connectivity.Connecting { + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + + var readySCs []balancer.SubConn + if lb.usePickFirst { + for _, sc := range lb.subConns { + readySCs = append(readySCs, sc) + break + } + } else { + for _, a := range lb.backendAddrsWithoutMetadata { + if sc, ok := lb.subConns[a]; ok { + if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + } + } + + if len(readySCs) <= 0 { + // If there's no ready SubConns, always re-pick. This is to avoid drops + // unless at least one SubConn is ready. Otherwise we may drop more + // often than want because of drops + re-picks(which become re-drops). + // + // This doesn't seem to be necessary after the connecting check above. + // Kept for safety. + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + if lb.inFallback { + lb.picker = newRRPicker(readySCs) + return + } + if resetDrop { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker, ok := lb.picker.(*lbPicker) + if !ok { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker.updateReadySCs(readySCs) +} + +// aggregateSubConnStats calculate the aggregated state of SubConns in +// lb.SubConns. These SubConns are subconns in use (when switching between +// fallback and grpclb). lb.scState contains states for all SubConns, including +// those in cache (SubConns are cached for 10 seconds after remove). +// +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { + var numConnecting uint64 + + for _, sc := range lb.subConns { + if state, ok := lb.scStates[sc]; ok { + switch state { + case connectivity.Ready: + return connectivity.Ready + case connectivity.Connecting: + numConnecting++ + } + } + } + if numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} + +func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not used") +} + +func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + s := scs.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + oldS, ok := lb.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + lb.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(lb.scStates, sc) + } + // Force regenerate picker if + // - this sc became ready from not-ready + // - this sc became not-ready from ready + lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) + + // Enter fallback when the aggregated state is not Ready and the connection + // to remote balancer is lost. + if lb.state != connectivity.Ready { + if !lb.inFallback && !lb.remoteBalancerConnected { + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + } +} + +// updateStateAndPicker re-calculate the aggregated state, and regenerate picker +// if overall state is changed. +// +// If forceRegeneratePicker is true, picker will be regenerated. +func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { + oldAggrState := lb.state + lb.state = lb.aggregateSubConnStates() + // Regenerate picker when one of the following happens: + // - caller wants to regenerate + // - the aggregated state changed + if forceRegeneratePicker || (lb.state != oldAggrState) { + lb.regeneratePicker(resetDrop) + } + + lb.cc.UpdateBalancerState(lb.state, lb.picker) +} + +// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use +// resolved backends (backends received from resolver, not from remote balancer) +// if no connection to remote balancers was successful. +func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { + timer := time.NewTimer(fallbackTimeout) + defer timer.Stop() + select { + case <-timer.C: + case <-lb.doneCh: + return + } + lb.mu.Lock() + if lb.inFallback || lb.serverListReceived { + lb.mu.Unlock() + return + } + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + lb.mu.Unlock() +} + +// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB +// clientConn. The remoteLB clientConn will handle creating/removing remoteLB +// connections. +func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not used") +} + +func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { + lb.mu.Lock() + defer lb.mu.Unlock() + + newUsePickFirst := childIsPickFirst(gc) + if lb.usePickFirst == newUsePickFirst { + return + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + } + lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) +} + +func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + } + gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) + lb.handleServiceConfig(gc) + + addrs := ccs.ResolverState.Addresses + if len(addrs) <= 0 { + return + } + + var remoteBalancerAddrs, backendAddrs []resolver.Address + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + a.Type = resolver.Backend + remoteBalancerAddrs = append(remoteBalancerAddrs, a) + } else { + backendAddrs = append(backendAddrs, a) + } + } + + if lb.ccRemoteLB == nil { + if len(remoteBalancerAddrs) <= 0 { + grpclog.Errorf("grpclb: no remote balancer address is available, should never happen") + return + } + // First time receiving resolved addresses, create a cc to remote + // balancers. + lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName) + // Start the fallback goroutine. + go lb.fallbackToBackendsAfter(lb.fallbackTimeout) + } + + // cc to remote balancers uses lb.manualResolver. Send the updated remote + // balancer addresses to it through manualResolver. + lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) + + lb.mu.Lock() + lb.resolvedBackendAddrs = backendAddrs + if lb.inFallback { + // This means we received a new list of resolved backends, and we are + // still in fallback mode. Need to update the list of backends we are + // using to the new list of backends. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() +} + +func (lb *lbBalancer) Close() { + select { + case <-lb.doneCh: + return + default: + } + close(lb.doneCh) + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.Close() + } + lb.cc.close() +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go new file mode 100644 index 000000000..aac371963 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "encoding/json" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/serviceconfig" +) + +const ( + roundRobinName = roundrobin.Name + pickFirstName = grpc.PickFirstBalancerName +) + +type grpclbServiceConfig struct { + serviceconfig.LoadBalancingConfig + ChildPolicy *[]map[string]json.RawMessage +} + +func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + ret := &grpclbServiceConfig{} + if err := json.Unmarshal(lbConfig, ret); err != nil { + return nil, err + } + return ret, nil +} + +func childIsPickFirst(sc *grpclbServiceConfig) bool { + if sc == nil { + return false + } + childConfigs := sc.ChildPolicy + if childConfigs == nil { + return false + } + for _, childC := range *childConfigs { + // If round_robin exists before pick_first, return false + if _, ok := childC[roundRobinName]; ok { + return false + } + // If pick_first is before round_robin, return true + if _, ok := childC[pickFirstName]; ok { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go new file mode 100644 index 000000000..6f023bc5e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/status" +) + +// rpcStats is same as lbmpb.ClientStats, except that numCallsDropped is a map +// instead of a slice. +type rpcStats struct { + // Only access the following fields atomically. + numCallsStarted int64 + numCallsFinished int64 + numCallsFinishedWithClientFailedToSend int64 + numCallsFinishedKnownReceived int64 + + mu sync.Mutex + // map load_balance_token -> num_calls_dropped + numCallsDropped map[string]int64 +} + +func newRPCStats() *rpcStats { + return &rpcStats{ + numCallsDropped: make(map[string]int64), + } +} + +// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. +func (s *rpcStats) toClientStats() *lbpb.ClientStats { + stats := &lbpb.ClientStats{ + NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), + NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), + NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), + NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), + } + s.mu.Lock() + dropped := s.numCallsDropped + s.numCallsDropped = make(map[string]int64) + s.mu.Unlock() + for token, count := range dropped { + stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ + LoadBalanceToken: token, + NumCalls: count, + }) + } + return stats +} + +func (s *rpcStats) drop(token string) { + atomic.AddInt64(&s.numCallsStarted, 1) + s.mu.Lock() + s.numCallsDropped[token]++ + s.mu.Unlock() + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) failedToSend() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) knownReceived() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +type errPicker struct { + // Pick always returns this err. + err error +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// rrPicker does roundrobin on subConns. It's typically used when there's no +// response from remote balancer, and grpclb falls back to the resolved +// backends. +// +// It guaranteed that len(subConns) > 0. +type rrPicker struct { + mu sync.Mutex + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int +} + +func newRRPicker(readySCs []balancer.SubConn) *rrPicker { + return &rrPicker{ + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + } +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + defer p.mu.Unlock() + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + return sc, nil, nil +} + +// lbPicker does two layers of picks: +// +// First layer: roundrobin on all servers in serverList, including drops and backends. +// - If it picks a drop, the RPC will fail as being dropped. +// - If it picks a backend, do a second layer pick to pick the real backend. +// +// Second layer: roundrobin on all READY backends. +// +// It's guaranteed that len(serverList) > 0. +type lbPicker struct { + mu sync.Mutex + serverList []*lbpb.Server + serverListNext int + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int + + stats *rpcStats +} + +func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { + return &lbPicker{ + serverList: serverList, + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + stats: stats, + } +} + +func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Layer one roundrobin on serverList. + s := p.serverList[p.serverListNext] + p.serverListNext = (p.serverListNext + 1) % len(p.serverList) + + // If it's a drop, return an error and fail the RPC. + if s.Drop { + p.stats.drop(s.LoadBalanceToken) + return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + + // If not a drop but there's no ready subConns. + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + // Return the next ready subConn in the list, also collect rpc stats. + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + done := func(info balancer.DoneInfo) { + if !info.BytesSent { + p.stats.failedToSend() + } else if info.BytesReceived { + p.stats.knownReceived() + } + } + return sc, done, nil +} + +func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { + p.mu.Lock() + defer p.mu.Unlock() + + p.subConns = readySCs + p.subConnsNext = p.subConnsNext % len(readySCs) +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go new file mode 100644 index 000000000..86320bff6 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -0,0 +1,353 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "fmt" + "io" + "net" + "reflect" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// processServerList updates balaner's internal state, create/remove SubConns +// and regenerates picker using the received serverList. +func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: processing server list: %+v", l) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + // Set serverListReceived to true so fallback will not take effect if it has + // not hit timeout. + lb.serverListReceived = true + + // If the new server list == old server list, do nothing. + if reflect.DeepEqual(lb.fullServerList, l.Servers) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + } + return + } + lb.fullServerList = l.Servers + + var backendAddrs []resolver.Address + for i, s := range l.Servers { + if s.Drop { + continue + } + + md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := resolver.Address{ + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Metadata: &md, + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", + i, ipStr, s.Port, s.LoadBalanceToken) + } + backendAddrs = append(backendAddrs, addr) + } + + // Call refreshSubConns to create/remove SubConns. If we are in fallback, + // this is also exiting fallback. + lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) +} + +// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes +// balancer state and picker. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { + lb.inFallback = fallback + + opts := balancer.NewSubConnOptions{} + if !fallback { + opts.CredsBundle = lb.grpclbBackendCreds + } + + lb.backendAddrs = backendAddrs + lb.backendAddrsWithoutMetadata = nil + + if lb.usePickFirst != pickFirst { + // Remove all SubConns when switching modes. + for a, sc := range lb.subConns { + if lb.usePickFirst { + lb.cc.cc.RemoveSubConn(sc) + } else { + lb.cc.RemoveSubConn(sc) + } + delete(lb.subConns, a) + } + lb.usePickFirst = pickFirst + } + + if lb.usePickFirst { + var sc balancer.SubConn + for _, sc = range lb.subConns { + break + } + if sc != nil { + sc.UpdateAddresses(backendAddrs) + sc.Connect() + return + } + // This bypasses the cc wrapper with SubConn cache. + sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + return + } + sc.Connect() + lb.subConns[backendAddrs[0]] = sc + lb.scStates[sc] = connectivity.Idle + return + } + + // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick + // lookup for an address. + addrsSet := make(map[resolver.Address]struct{}) + // Create new SubConns. + for _, addr := range backendAddrs { + addrWithoutMD := addr + addrWithoutMD.Metadata = nil + addrsSet[addrWithoutMD] = struct{}{} + lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutMD) + + if _, ok := lb.subConns[addrWithoutMD]; !ok { + // Use addrWithMD to create the SubConn. + sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + continue + } + lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. + if _, ok := lb.scStates[sc]; !ok { + // Only set state of new sc to IDLE. The state could already be + // READY for cached SubConns. + lb.scStates[sc] = connectivity.Idle + } + sc.Connect() + } + } + + for a, sc := range lb.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + lb.cc.RemoveSubConn(sc) + delete(lb.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + + // Regenerate and update picker after refreshing subconns because with + // cache, even if SubConn was newed/removed, there might be no state + // changes (the subconn will be kept in cache, not actually + // newed/removed). + lb.updateStateAndPicker(true, true) +} + +func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error { + for { + reply, err := s.Recv() + if err != nil { + if err == io.EOF { + return errServerTerminatedConnection + } + return fmt.Errorf("grpclb: failed to recv server list: %v", err) + } + if serverList := reply.GetServerList(); serverList != nil { + lb.processServerList(serverList) + } + } +} + +func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + case <-s.Context().Done(): + return + } + stats := lb.clientStats.toClientStats() + t := time.Now() + stats.Timestamp = ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: stats, + }, + }); err != nil { + return + } + } +} + +func (lb *lbBalancer) callRemoteBalancer() (backoff bool, _ error) { + lbClient := &loadBalancerClient{cc: lb.ccRemoteLB} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) + if err != nil { + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + } + lb.mu.Lock() + lb.remoteBalancerConnected = true + lb.mu.Unlock() + + // grpclb handshake on the stream. + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: lb.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + return true, fmt.Errorf("grpclb: failed to send init request: %v", err) + } + reply, err := stream.Recv() + if err != nil { + return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) + } + initResp := reply.GetInitialResponse() + if initResp == nil { + return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") + } + if initResp.LoadBalancerDelegate != "" { + return true, fmt.Errorf("grpclb: Delegation is not supported") + } + + go func() { + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + lb.sendLoadReport(stream, d) + } + }() + // No backoff if init req/resp handshake was successful. + return false, lb.readServerList(stream) +} + +func (lb *lbBalancer) watchRemoteBalancer() { + var retryCount int + for { + doBackoff, err := lb.callRemoteBalancer() + select { + case <-lb.doneCh: + return + default: + if err != nil { + if err == errServerTerminatedConnection { + grpclog.Info(err) + } else { + grpclog.Warning(err) + } + } + } + // Trigger a re-resolve when the stream errors. + lb.cc.cc.ResolveNow(resolver.ResolveNowOption{}) + + lb.mu.Lock() + lb.remoteBalancerConnected = false + lb.fullServerList = nil + // Enter fallback when connection to remote balancer is lost, and the + // aggregated state is not Ready. + if !lb.inFallback && lb.state != connectivity.Ready { + // Entering fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() + + if !doBackoff { + retryCount = 0 + continue + } + + timer := time.NewTimer(lb.backoff.Backoff(retryCount)) + select { + case <-timer.C: + case <-lb.doneCh: + timer.Stop() + return + } + retryCount++ + } +} + +func (lb *lbBalancer) dialRemoteLB(remoteLBName string) { + var dopts []grpc.DialOption + if creds := lb.opt.DialCreds; creds != nil { + if err := creds.OverrideServerName(remoteLBName); err == nil { + dopts = append(dopts, grpc.WithTransportCredentials(creds)) + } else { + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err) + dopts = append(dopts, grpc.WithInsecure()) + } + } else if bundle := lb.grpclbClientConnCreds; bundle != nil { + dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) + } else { + dopts = append(dopts, grpc.WithInsecure()) + } + if lb.opt.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) + } + // Explicitly set pickfirst as the balancer. + dopts = append(dopts, grpc.WithBalancerName(grpc.PickFirstBalancerName)) + wrb := internal.WithResolverBuilder.(func(resolver.Builder) grpc.DialOption) + dopts = append(dopts, wrb(lb.manualResolver)) + if channelz.IsOn() { + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) + } + + // DialContext using manualResolver.Scheme, which is a random scheme + // generated when init grpclb. The target scheme here is not important. + // + // The grpc dial target will be used by the creds (ALTS) as the authority, + // so it has to be set to remoteLBName that comes from resolver. + cc, err := grpc.DialContext(context.Background(), remoteLBName, dopts...) + if err != nil { + grpclog.Fatalf("failed to dial: %v", err) + } + lb.ccRemoteLB = cc + go lb.watchRemoteBalancer() +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go new file mode 100644 index 000000000..2663c37e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +// The parent ClientConn should re-resolve when grpclb loses connection to the +// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, +// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's +// ResolveNow, and eventually results in re-resolve happening in parent +// ClientConn's resolver (DNS for example). +// +// parent +// ClientConn +// +-----------------------------------------------------------------+ +// | parent +---------------------------------+ | +// | DNS ClientConn | grpclb | | +// | resolver balancerWrapper | | | +// | + + | grpclb grpclb | | +// | | | | ManualResolver ClientConn | | +// | | | | + + | | +// | | | | | | Transient | | +// | | | | | | Failure | | +// | | | | | <--------- | | | +// | | | <--------------- | ResolveNow | | | +// | | <--------- | ResolveNow | | | | | +// | | ResolveNow | | | | | | +// | | | | | | | | +// | + + | + + | | +// | +---------------------------------+ | +// +-----------------------------------------------------------------+ + +// lbManualResolver is used by the ClientConn inside grpclb. It's a manual +// resolver with a special ResolveNow() function. +// +// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, +// so when grpclb client lose contact with remote balancers, the parent +// ClientConn's resolver will re-resolve. +type lbManualResolver struct { + scheme string + ccr resolver.ClientConn + + ccb balancer.ClientConn +} + +func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) { + r.ccr = cc + return r, nil +} + +func (r *lbManualResolver) Scheme() string { + return r.scheme +} + +// ResolveNow calls resolveNow on the parent ClientConn. +func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) { + r.ccb.ResolveNow(o) +} + +// Close is a noop for Resolver. +func (*lbManualResolver) Close() {} + +// UpdateState calls cc.UpdateState. +func (r *lbManualResolver) UpdateState(s resolver.State) { + r.ccr.UpdateState(s) +} + +const subConnCacheTime = time.Second * 10 + +// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. +// SubConns will be kept in cache for subConnCacheTime before being removed. +// +// Its new and remove methods are updated to do cache first. +type lbCacheClientConn struct { + cc balancer.ClientConn + timeout time.Duration + + mu sync.Mutex + // subConnCache only keeps subConns that are being deleted. + subConnCache map[resolver.Address]*subConnCacheEntry + subConnToAddr map[balancer.SubConn]resolver.Address +} + +type subConnCacheEntry struct { + sc balancer.SubConn + + cancel func() + abortDeleting bool +} + +func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { + return &lbCacheClientConn{ + cc: cc, + timeout: subConnCacheTime, + subConnCache: make(map[resolver.Address]*subConnCacheEntry), + subConnToAddr: make(map[balancer.SubConn]resolver.Address), + } +} + +func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) + } + addrWithoutMD := addrs[0] + addrWithoutMD.Metadata = nil + + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { + // If entry is in subConnCache, the SubConn was being deleted. + // cancel function will never be nil. + entry.cancel() + delete(ccc.subConnCache, addrWithoutMD) + return entry.sc, nil + } + + scNew, err := ccc.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + + ccc.subConnToAddr[scNew] = addrWithoutMD + return scNew, nil +} + +func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + ccc.mu.Lock() + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + // This could happen if NewSubConn was called multiple times for the + // same address, and those SubConns are all removed. We remove sc + // immediately here. + delete(ccc.subConnToAddr, sc) + ccc.cc.RemoveSubConn(sc) + } + return + } + + entry := &subConnCacheEntry{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { + ccc.mu.Lock() + if entry.abortDeleting { + return + } + ccc.cc.RemoveSubConn(sc) + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + ccc.mu.Unlock() + }) + entry.cancel = func() { + if !timer.Stop() { + // If stop was not successful, the timer has fired (this can only + // happen in a race). But the deleting function is blocked on ccc.mu + // because the mutex was held by the caller of this function. + // + // Set abortDeleting to true to abort the deleting function. When + // the lock is released, the deleting function will acquire the + // lock, check the value of abortDeleting and return. + entry.abortDeleting = true + } + } +} + +func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccc.cc.UpdateBalancerState(s, p) +} + +func (ccc *lbCacheClientConn) close() { + ccc.mu.Lock() + // Only cancel all existing timers. There's no need to remove SubConns. + for _, entry := range ccc.subConnCache { + entry.cancel() + } + ccc.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..29f7a4ddd --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "context" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { + grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) + if len(readySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + var scs []balancer.SubConn + for _, sc := range readySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000..7bc6621a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,318 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that the scStateUpdate will be sent to. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + ccUpdateCh chan *balancer.ClientConnState + done chan struct{} + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + ccUpdateCh: make(chan *balancer.ClientConnState, 1), + done: make(chan struct{}), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state}) + } else { + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + } + case s := <-ccb.ccUpdateCh: + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ub.UpdateClientConnState(*s) + } else { + ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil) + } + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + ccb.UpdateBalancerState(connectivity.Connecting, nil) + return + default: + } + ccb.cc.firstResolveEvent.Fire() + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) { + if ccb.cc.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + s := ccs.ResolverState + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + select { + case <-ccb.ccUpdateCh: + default: + } + ccb.ccUpdateCh <- ccs +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(p) + ccb.cc.csMgr.updateState(s) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs, opts) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 000000000..66e9a44ac --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,334 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: opts.Target.Endpoint, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.RecordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() +} + +// The picker is the balancerWrapper itself. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + if p != nil { + done = func(balancer.DoneInfo) { p() } + defer func() { + if err != nil { + p() + } + }() + } + + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc := range bw.conns { + return sc, done, nil + } + return nil, nil, balancer.ErrNoSubConnAvailable + } + sc, ok1 := bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + s, ok2 := bw.connSt[sc] + if !ok1 || !ok2 { + // This can only happen due to a race where Get() returned an address + // that was subsequently removed by Notify. In this case we should + // retry always. + return nil, nil, balancer.ErrNoSubConnAvailable + } + switch s.s { + case connectivity.Ready, connectivity.Idle: + return sc, done, nil + case connectivity.Shutdown, connectivity.TransientFailure: + // If the returned sc has been shut down or is in transient failure, + // return error, and this RPC will fail or wait for another picker (if + // non-failfast). + return nil, nil, balancer.ErrTransientFailure + default: + // For other states (connecting or unknown), the v1 balancer would + // traditionally wait until ready and then issue the RPC. Returning + // ErrNoSubConnAvailable will be a slight improvement in that it will + // allow the balancer to choose another address in case others are + // connected. + return nil, nil, balancer.ErrNoSubConnAvailable + } +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 000000000..f393bb661 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +var GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", +} +var GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, +} + +func (x GrpcLogEntry_EventType) String() string { + return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) +} +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +var GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", +} +var GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, +} + +func (x GrpcLogEntry_Logger) String() string { + return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) +} +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +var Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", +} +var Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, +} + +func (x Address_Type) String() string { + return proto.EnumName(Address_Type_name, int32(x)) +} +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + // The timestamp of the binary log message + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are valid to be assigned to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } +func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } +func (*GrpcLogEntry) ProtoMessage() {} +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} +} +func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) +} +func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) +} +func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +} +func (m *GrpcLogEntry) XXX_Size() int { + return xxx_messageInfo_GrpcLogEntry.Size(m) +} +func (m *GrpcLogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo + +func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *GrpcLogEntry) GetCallId() uint64 { + if m != nil { + return m.CallId + } + return 0 +} + +func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if m != nil { + return m.SequenceIdWithinCall + } + return 0 +} + +func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if m != nil { + return m.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if m != nil { + return m.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (m *GrpcLogEntry) GetMessage() *Message { + if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (m *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (m *GrpcLogEntry) GetPayloadTruncated() bool { + if m != nil { + return m.PayloadTruncated + } + return false +} + +func (m *GrpcLogEntry) GetPeer() *Address { + if m != nil { + return m.Peer + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } +} + +func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientHeader); err != nil { + return err + } + case *GrpcLogEntry_ServerHeader: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerHeader); err != nil { + return err + } + case *GrpcLogEntry_Message: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Message); err != nil { + return err + } + case *GrpcLogEntry_Trailer: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Trailer); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) + } + return nil +} + +func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GrpcLogEntry) + switch tag { + case 6: // payload.client_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ClientHeader{msg} + return true, err + case 7: // payload.server_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ServerHeader{msg} + return true, err + case 8: // payload.message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Message) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Message{msg} + return true, err + case 9: // payload.trailer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Trailer) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Trailer{msg} + return true, err + default: + return false, nil + } +} + +func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + s := proto.Size(x.ClientHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_ServerHeader: + s := proto.Size(x.ServerHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Message: + s := proto.Size(x.Message) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Trailer: + s := proto.Size(x.Trailer) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ClientHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientHeader) Reset() { *m = ClientHeader{} } +func (m *ClientHeader) String() string { return proto.CompactTextString(m) } +func (*ClientHeader) ProtoMessage() {} +func (*ClientHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} +} +func (m *ClientHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientHeader.Unmarshal(m, b) +} +func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) +} +func (dst *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(dst, src) +} +func (m *ClientHeader) XXX_Size() int { + return xxx_messageInfo_ClientHeader.Size(m) +} +func (m *ClientHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ClientHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientHeader proto.InternalMessageInfo + +func (m *ClientHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ClientHeader) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +func (m *ClientHeader) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *ClientHeader) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +type ServerHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHeader) Reset() { *m = ServerHeader{} } +func (m *ServerHeader) String() string { return proto.CompactTextString(m) } +func (*ServerHeader) ProtoMessage() {} +func (*ServerHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} +} +func (m *ServerHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHeader.Unmarshal(m, b) +} +func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) +} +func (dst *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(dst, src) +} +func (m *ServerHeader) XXX_Size() int { + return xxx_messageInfo_ServerHeader.Size(m) +} +func (m *ServerHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHeader proto.InternalMessageInfo + +func (m *ServerHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Trailer struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Trailer) Reset() { *m = Trailer{} } +func (m *Trailer) String() string { return proto.CompactTextString(m) } +func (*Trailer) ProtoMessage() {} +func (*Trailer) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} +} +func (m *Trailer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Trailer.Unmarshal(m, b) +} +func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) +} +func (dst *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(dst, src) +} +func (m *Trailer) XXX_Size() int { + return xxx_messageInfo_Trailer.Size(m) +} +func (m *Trailer) XXX_DiscardUnknown() { + xxx_messageInfo_Trailer.DiscardUnknown(m) +} + +var xxx_messageInfo_Trailer proto.InternalMessageInfo + +func (m *Trailer) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Trailer) GetStatusCode() uint32 { + if m != nil { + return m.StatusCode + } + return 0 +} + +func (m *Trailer) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Trailer) GetStatusDetails() []byte { + if m != nil { + return m.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (dst *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(dst, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetEntry() []*MetadataEntry { + if m != nil { + return m.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } +func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } +func (*MetadataEntry) ProtoMessage() {} +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} +} +func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) +} +func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) +} +func (dst *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(dst, src) +} +func (m *MetadataEntry) XXX_Size() int { + return xxx_messageInfo_MetadataEntry.Size(m) +} +func (m *MetadataEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MetadataEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo + +func (m *MetadataEntry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *MetadataEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Address information +type Address struct { + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Address) Reset() { *m = Address{} } +func (m *Address) String() string { return proto.CompactTextString(m) } +func (*Address) ProtoMessage() {} +func (*Address) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} +} +func (m *Address) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Address.Unmarshal(m, b) +} +func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Address.Marshal(b, m, deterministic) +} +func (dst *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(dst, src) +} +func (m *Address) XXX_Size() int { + return xxx_messageInfo_Address.Size(m) +} +func (m *Address) XXX_DiscardUnknown() { + xxx_messageInfo_Address.DiscardUnknown(m) +} + +var xxx_messageInfo_Address proto.InternalMessageInfo + +func (m *Address) GetType() Address_Type { + if m != nil { + return m.Type + } + return Address_TYPE_UNKNOWN +} + +func (m *Address) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Address) GetIpPort() uint32 { + if m != nil { + return m.IpPort + } + return 0 +} + +func init() { + proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") + proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") + proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") + proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") + proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") + proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") + proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") + proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) +} + +func init() { + proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) +} + +var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, + 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, + 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, + 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, + 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, + 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, + 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, + 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, + 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, + 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, + 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, + 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, + 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, + 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, + 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, + 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, + 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, + 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, + 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, + 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, + 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, + 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, + 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, + 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, + 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, + 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, + 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, + 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, + 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, + 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, + 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, + 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, + 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, + 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, + 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, + 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, + 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, + 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, + 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, + 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, + 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, + 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, + 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, + 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, + 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, + 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, + 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, + 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, + 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, + 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, + 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, + 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, + 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, + 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, + 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, + 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, + 0xd4, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..9e20e4d38 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..84e31a267 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1433 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., OAuth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtINFO, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + }) + } + cc.csMgr.channelzID = cc.channelzID + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + } else { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err) + } + cc.dopts.defaultServiceConfig = sc + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.Exponential{ + MaxDelay: DefaultBackoffConfig.MaxDelay, + } + } + if cc.dopts.resolverBuilder == nil { + // Only try to parse target when resolver builder is not already set. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + if cc.dopts.resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + } + } else { + cc.parsedTarget = resolver.Target{Endpoint: target} + } + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.blockingpicker.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if channelz.IsOn() { + channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel Connectivity change to %v", state), + Severity: channelz.CtINFO, + }) + } + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConn represents a client connection to an RPC server. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State) error { + cc.mu.Lock() + defer cc.mu.Unlock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + return nil + } + + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { + cc.applyServiceConfig(cc.dopts.defaultServiceConfig) + } + } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok { + cc.applyServiceConfig(sc) + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + balCfg = cc.sc.lbConfig.cfg + } else { + var isGRPCLB bool + for _, a := range s.Addresses { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } + + cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + return nil +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + grpclog.Infof("ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + + builder := balancer.Get(name) + if channelz.IsOn() { + if builder == nil { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), + Severity: channelz.CtWarning, + }) + } else { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), + Severity: channelz.CtINFO, + }) + } + } + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return MethodConfig{} + } + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { + if sc == nil { + // should never reach here. + return fmt.Errorf("got nil pointer for service config") + } + cc.sc = sc + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + return nil +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// This API is EXPERIMENTAL. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + defer cc.mu.Unlock() + for ac := range cc.conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtINFO, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtINFO, + } + } + channelz.AddTraceEvent(cc.channelzID, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State) { + if ac.state == s { + return + } + + updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) + ac.state = s + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: updateMsg, + Severity: channelz.CtINFO, + }) + } + ac.cc.handleSubConnStateChange(ac.acbw, s) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + for i := 0; ; i++ { + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOption{}) + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting) + ac.transport = nil + ac.mu.Unlock() + + newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + if err != nil { + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + continue + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + newTr.Close() + ac.mu.Unlock() + return + } + ac.curAddr = addr + ac.transport = newTr + ac.backoffIdx = 0 + + hctx, hcancel := context.WithCancel(ac.ctx) + ac.startHealthCheck(hctx) + ac.mu.Unlock() + + // Block until the created transport is down. And when this happens, + // we restart from the top of the addr list. + <-reconnect.Done() + hcancel() + + // Need to reconnect after a READY, the addrConn enters + // TRANSIENT_FAILURE. + // + // This will set addrConn to TRANSIENT_FAILURE for a very short period + // of time, and turns CONNECTING. It seems reasonable to skip this, but + // READY-CONNECTING is not a valid transition. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure) + ac.mu.Unlock() + } +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +// first successful one. It returns the transport, the address and a Event in +// the successful case. The Event fires when the returned transport disconnects. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return nil, resolver.Address{}, nil, errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), + Severity: channelz.CtINFO, + }) + } + + newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return newTr, addr, reconnect, nil + } + ac.cc.blockingpicker.updateConnectionError(err) + } + + // Couldn't connect to any address. + return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address") +} + +// createTransport creates a connection to addr. It returns the transport and a +// Event in the successful case. The Event fires when the returned transport +// disconnects. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + reconnect := grpcsync.NewEvent() + + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + reconnect.Fire() + } + + onClose := func() { + close(onCloseCalled) + reconnect.Fire() + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + return nil, nil, err + } + + if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn { + select { + case <-time.After(connectDeadline.Sub(time.Now())): + // We didn't get the preface in time. + newTr.Close() + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + } + } + return newTr, reconnect, nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + grpclog.Error("Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", + Severity: channelz.CtError, + }) + } + grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready && ac.transport != nil { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancelation / etc. + ac.updateConnectivityState(connectivity.Shutdown) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity beng deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..129776547 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..0b206a578 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..02738839d --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..34ec36fbf --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "context" + + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go new file mode 100644 index 000000000..72c7f0b23 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -0,0 +1,330 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package alts implements the ALTS credential support by gRPC library, which +// encapsulates all the state needed by a client to authenticate with a server +// using ALTS and make various assertions, e.g., about the client's identity, +// role, or whether it is authorized to make a particular call. +// This package is experimental. +package alts + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/handshaker" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/grpclog" +) + +const ( + // hypervisorHandshakerServiceAddress represents the default ALTS gRPC + // handshaker service address in the hypervisor. + hypervisorHandshakerServiceAddress = "metadata.google.internal:8080" + // defaultTimeout specifies the server handshake timeout. + defaultTimeout = 30.0 * time.Second + // The following constants specify the minimum and maximum acceptable + // protocol versions. + protocolVersionMaxMajor = 2 + protocolVersionMaxMinor = 1 + protocolVersionMinMajor = 2 + protocolVersionMinMinor = 1 +) + +var ( + once sync.Once + maxRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMaxMajor, + Minor: protocolVersionMaxMinor, + } + minRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMinMajor, + Minor: protocolVersionMinMinor, + } + // ErrUntrustedPlatform is returned from ClientHandshake and + // ServerHandshake is running on a platform where the trustworthiness of + // the handshaker service is not guaranteed. + ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") +) + +// AuthInfo exposes security information from the ALTS handshake to the +// application. This interface is to be implemented by ALTS. Users should not +// need a brand new implementation of this interface. For situations like +// testing, any new implementation should embed this interface. This allows +// ALTS to add new methods to this interface. +type AuthInfo interface { + // ApplicationProtocol returns application protocol negotiated for the + // ALTS connection. + ApplicationProtocol() string + // RecordProtocol returns the record protocol negotiated for the ALTS + // connection. + RecordProtocol() string + // SecurityLevel returns the security level of the created ALTS secure + // channel. + SecurityLevel() altspb.SecurityLevel + // PeerServiceAccount returns the peer service account. + PeerServiceAccount() string + // LocalServiceAccount returns the local service account. + LocalServiceAccount() string + // PeerRPCVersions returns the RPC version supported by the peer. + PeerRPCVersions() *altspb.RpcProtocolVersions +} + +// ClientOptions contains the client-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ClientOptions struct { + // TargetServiceAccounts contains a list of expected target service + // accounts. + TargetServiceAccounts []string + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultClientOptions creates a new ClientOptions object with the default +// values. +func DefaultClientOptions() *ClientOptions { + return &ClientOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// ServerOptions contains the server-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ServerOptions struct { + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultServerOptions creates a new ServerOptions object with the default +// values. +func DefaultServerOptions() *ServerOptions { + return &ServerOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// altsTC is the credentials required for authenticating a connection using ALTS. +// It implements credentials.TransportCredentials interface. +type altsTC struct { + info *credentials.ProtocolInfo + side core.Side + accounts []string + hsAddress string +} + +// NewClientCreds constructs a client-side ALTS TransportCredentials object. +func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { + return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) +} + +// NewServerCreds constructs a server-side ALTS TransportCredentials object. +func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { + return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) +} + +func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { + once.Do(func() { + vmOnGCP = isRunningOnGCP() + }) + + if hsAddress == "" { + hsAddress = hypervisorHandshakerServiceAddress + } + return &altsTC{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: "alts", + SecurityVersion: "1.0", + }, + side: side, + accounts: accounts, + hsAddress: hsAddress, + } +} + +// ClientHandshake implements the client side handshake protocol. +func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it is shared with other handshakes. + + // Possible context leak: + // The cancel function for the child context we create will only be + // called a non-nil error is returned. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + opts := handshaker.DefaultClientHandshakerOptions() + opts.TargetName = addr + opts.TargetServiceAccounts = g.accounts + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + chs.Close() + } + }() + secConn, authInfo, err := chs.ClientHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +// ServerHandshake implements the server side ALTS handshaker. +func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it's shared with other handshakes. + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + opts := handshaker.DefaultServerHandshakerOptions() + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + shs.Close() + } + }() + secConn, authInfo, err := shs.ServerHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +func (g *altsTC) Info() credentials.ProtocolInfo { + return *g.info +} + +func (g *altsTC) Clone() credentials.TransportCredentials { + info := *g.info + var accounts []string + if g.accounts != nil { + accounts = make([]string, len(g.accounts)) + copy(accounts, g.accounts) + } + return &altsTC{ + info: &info, + side: g.side, + hsAddress: g.hsAddress, + accounts: accounts, + } +} + +func (g *altsTC) OverrideServerName(serverNameOverride string) error { + g.info.ServerName = serverNameOverride + return nil +} + +// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. +func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { + switch { + case v1.GetMajor() > v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): + return 1 + case v1.GetMajor() < v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): + return -1 + } + return 0 +} + +// checkRPCVersions performs a version check between local and peer rpc protocol +// versions. This function returns true if the check passes which means both +// parties agreed on a common rpc protocol to use, and false otherwise. The +// function also returns the highest common RPC protocol version both parties +// agreed on. +func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { + if local == nil || peer == nil { + grpclog.Error("invalid checkRPCVersions argument, either local or peer is nil.") + return false, nil + } + + // maxCommonVersion is MIN(local.max, peer.max). + maxCommonVersion := local.GetMaxRpcVersion() + if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { + maxCommonVersion = peer.GetMaxRpcVersion() + } + + // minCommonVersion is MAX(local.min, peer.min). + minCommonVersion := peer.GetMinRpcVersion() + if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { + minCommonVersion = local.GetMinRpcVersion() + } + + if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { + return false, nil + } + return true, maxCommonVersion +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go new file mode 100644 index 000000000..ed628dc7c --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provide authentication information returned by handshakers. +package authinfo + +import ( + "google.golang.org/grpc/credentials" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +var _ credentials.AuthInfo = (*altsAuthInfo)(nil) + +// altsAuthInfo exposes security information from the ALTS handshake to the +// application. altsAuthInfo is immutable and implements credentials.AuthInfo. +type altsAuthInfo struct { + p *altspb.AltsContext +} + +// New returns a new altsAuthInfo object given handshaker results. +func New(result *altspb.HandshakerResult) credentials.AuthInfo { + return newAuthInfo(result) +} + +func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { + return &altsAuthInfo{ + p: &altspb.AltsContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + RecordProtocol: result.GetRecordProtocol(), + // TODO: assign security level from result. + SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, + PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), + LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), + PeerRpcVersions: result.GetPeerRpcVersions(), + }, + } +} + +// AuthType identifies the context as providing ALTS authentication information. +func (s *altsAuthInfo) AuthType() string { + return "alts" +} + +// ApplicationProtocol returns the context's application protocol. +func (s *altsAuthInfo) ApplicationProtocol() string { + return s.p.GetApplicationProtocol() +} + +// RecordProtocol returns the context's record protocol. +func (s *altsAuthInfo) RecordProtocol() string { + return s.p.GetRecordProtocol() +} + +// SecurityLevel returns the context's security level. +func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { + return s.p.GetSecurityLevel() +} + +// PeerServiceAccount returns the context's peer service account. +func (s *altsAuthInfo) PeerServiceAccount() string { + return s.p.GetPeerServiceAccount() +} + +// LocalServiceAccount returns the context's local service account. +func (s *altsAuthInfo) LocalServiceAccount() string { + return s.p.GetLocalServiceAccount() +} + +// PeerRPCVersions returns the context's peer RPC versions. +func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { + return s.p.GetPeerRpcVersions() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go new file mode 100644 index 000000000..33fba8123 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package internal contains common core functionality for ALTS. +package internal + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +const ( + // ClientSide identifies the client in this communication. + ClientSide Side = iota + // ServerSide identifies the server in this communication. + ServerSide +) + +// PeerNotRespondingError is returned when a peer server is not responding +// after a channel has been established. It is treated as a temporary connection +// error and re-connection to the server should be attempted. +var PeerNotRespondingError = &peerNotRespondingError{} + +// Side identifies the party's role: client or server. +type Side int + +type peerNotRespondingError struct{} + +// Return an error message for the purpose of logging. +func (e *peerNotRespondingError) Error() string { + return "peer server is not responding and re-connection should be attempted." +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e *peerNotRespondingError) Temporary() bool { + return true +} + +// Handshaker defines a ALTS handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a client-side handshaking and + // returns a secure connection and corresponding auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a server-side handshaking and + // returns a secure connection and corresponding auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the caller + // obtains the secure connection. + Close() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go new file mode 100644 index 000000000..43726e877 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" + "strconv" +) + +// rekeyAEAD holds the necessary information for an AEAD based on +// AES-GCM that performs nonce-based key derivation and XORs the +// nonce with a random mask. +type rekeyAEAD struct { + kdfKey []byte + kdfCounter []byte + nonceMask []byte + nonceBuf []byte + gcmAEAD cipher.AEAD +} + +// KeySizeError signals that the given key does not have the correct size. +type KeySizeError int + +func (k KeySizeError) Error() string { + return "alts/conn: invalid key size " + strconv.Itoa(int(k)) +} + +// newRekeyAEAD creates a new instance of aes128gcm with rekeying. +// The key argument should be 44 bytes, the first 32 bytes are used as a key +// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// the counter. +func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { + k := len(key) + if k != kdfKeyLen+nonceLen { + return nil, KeySizeError(k) + } + return &rekeyAEAD{ + kdfKey: key[:kdfKeyLen], + kdfCounter: make([]byte, kdfCounterLen), + nonceMask: key[kdfKeyLen:], + nonceBuf: make([]byte, nonceLen), + gcmAEAD: nil, + }, nil +} + +// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Seal for aes128gcm. +func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if err := s.rekeyIfRequired(nonce); err != nil { + panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) +} + +// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Open for aes128gcm. +func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if err := s.rekeyIfRequired(nonce); err != nil { + return nil, err + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) +} + +// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil +// or cannot be used with given nonce. +func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { + newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] + if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { + return nil + } + copy(s.kdfCounter, newKdfCounter) + a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) + if err != nil { + return err + } + s.gcmAEAD, err = cipher.NewGCM(a) + return err +} + +// maskNonce XORs the given nonce with the mask and stores the result in dst. +func maskNonce(dst, nonce, mask []byte) { + nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) + nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) + mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) + mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) + binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) + binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) +} + +// NonceSize returns the required nonce size. +func (s *rekeyAEAD) NonceSize() int { + return s.gcmAEAD.NonceSize() +} + +// Overhead returns the ciphertext overhead. +func (s *rekeyAEAD) Overhead() int { + return s.gcmAEAD.Overhead() +} + +// hkdfExpand computes the first 16 bytes of the HKDF-expand function +// defined in RFC5869. +func hkdfExpand(key, info []byte) []byte { + mac := hmac.New(sha256.New, key) + mac.Write(info) + mac.Write([]byte{0x01}[:]) + return mac.Sum(nil)[:aeadKeyLen] +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go new file mode 100644 index 000000000..04e0adb6c --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/aes" + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCM = 5 +) + +// aes128gcm is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcm struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + aead cipher.AEAD +} + +// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. +func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aes128gcm{ + inCounter: NewInCounter(side, overflowLenAES128GCM), + outCounter: NewOutCounter(side, overflowLenAES128GCM), + aead: a, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.aead.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcm) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go new file mode 100644 index 000000000..6a9035ea2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCMRekey = 8 + nonceLen = 12 + aeadKeyLen = 16 + kdfKeyLen = 32 + kdfCounterOffset = 2 + kdfCounterLen = 6 + sizeUint64 = 8 +) + +// aes128gcmRekey is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcmRekey struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + inAEAD cipher.AEAD + outAEAD cipher.AEAD +} + +// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying +// for ALTS record. The key argument should be 44 bytes, the first 32 bytes +// are used as a key for HKDF-expand and the remainining 12 bytes are used +// as a random mask for the counter. +func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { + inCounter := NewInCounter(side, overflowLenAES128GCMRekey) + outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) + inAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + outAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + return &aes128gcmRekey{ + inCounter, + outCounter, + inAEAD, + outAEAD, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcmRekey) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go new file mode 100644 index 000000000..1795d0c9e --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + // GcmTagSize is the GCM tag size is the difference in length between + // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto + // library. + GcmTagSize = 16 +) + +// ErrAuth occurs on authentication failure. +var ErrAuth = errors.New("message authentication failed") + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// ParseFramedMsg parse the provided buffer and returns a frame of the format +// msgLength+msg and any remaining bytes in that buffer. +func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { + // If the size field is not complete, return the provided buffer as + // remaining buffer. + if len(b) < MsgLenFieldSize { + return nil, b, nil + } + msgLenField := b[:MsgLenFieldSize] + length := binary.LittleEndian.Uint32(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) + } + if len(b) < int(length)+4 { // account for the first 4 msg length bytes. + // Frame is not complete yet. + return nil, b, nil + } + return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go new file mode 100644 index 000000000..9f00aca0b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "errors" +) + +const counterLen = 12 + +var ( + errInvalidCounter = errors.New("invalid counter") +) + +// Counter is a 96-bit, little-endian counter. +type Counter struct { + value [counterLen]byte + invalid bool + overflowLen int +} + +// Value returns the current value of the counter as a byte slice. +func (c *Counter) Value() ([]byte, error) { + if c.invalid { + return nil, errInvalidCounter + } + return c.value[:], nil +} + +// Inc increments the counter and checks for overflow. +func (c *Counter) Inc() { + // If the counter is already invalid, there is no need to increase it. + if c.invalid { + return + } + i := 0 + for ; i < c.overflowLen; i++ { + c.value[i]++ + if c.value[i] != 0 { + break + } + } + if i == c.overflowLen { + c.invalid = true + } +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go new file mode 100644 index 000000000..fd5a53d9a --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -0,0 +1,271 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package conn contains an implementation of a secure channel created by gRPC +// handshakers. +package conn + +import ( + "encoding/binary" + "fmt" + "math" + "net" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. +type ALTSRecordCrypto interface { + // Encrypt encrypts the plaintext and computes the tag (if any) of dst + // and plaintext, dst and plaintext do not overlap. + Encrypt(dst, plaintext []byte) ([]byte, error) + // EncryptionOverhead returns the tag size (if any) in bytes. + EncryptionOverhead() int + // Decrypt decrypts ciphertext and verify the tag (if any). dst and + // ciphertext may alias exactly or not at all. To reuse ciphertext's + // storage for the decrypted output, use ciphertext[:0] as dst. + Decrypt(dst, ciphertext []byte) ([]byte, error) +} + +// ALTSRecordFunc is a function type for factory functions that create +// ALTSRecordCrypto instances. +type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) + +const ( + // MsgLenFieldSize is the byte size of the frame length field of a + // framed message. + MsgLenFieldSize = 4 + // The byte size of the message type field of a framed message. + msgTypeFieldSize = 4 + // The bytes size limit for a ALTS record message. + altsRecordLengthLimit = 1024 * 1024 // 1 MiB + // The default bytes size of a ALTS record message. + altsRecordDefaultLength = 4 * 1024 // 4KiB + // Message type value included in ALTS record framing. + altsRecordMsgType = uint32(0x06) + // The initial write buffer size. + altsWriteBufferInitialSize = 32 * 1024 // 32KiB + // The maximum write buffer size. This *must* be multiple of + // altsRecordDefaultLength. + altsWriteBufferMaxSize = 512 * 1024 // 512KiB +) + +var ( + protocols = make(map[string]ALTSRecordFunc) +) + +// RegisterProtocol register a ALTS record encryption protocol. +func RegisterProtocol(protocol string, f ALTSRecordFunc) error { + if _, ok := protocols[protocol]; ok { + return fmt.Errorf("protocol %v is already registered", protocol) + } + protocols[protocol] = f + return nil +} + +// conn represents a secured connection. It implements the net.Conn interface. +type conn struct { + net.Conn + crypto ALTSRecordCrypto + // buf holds data that has been read from the connection and decrypted, + // but has not yet been returned by Read. + buf []byte + payloadLengthLimit int + // protected holds data read from the network but have not yet been + // decrypted. This data might not compose a complete frame. + protected []byte + // writeBuf is a buffer used to contain encrypted frames before being + // written to the network. + writeBuf []byte + // nextFrame stores the next frame (in protected buffer) info. + nextFrame []byte + // overhead is the calculated overhead of each frame. + overhead int +} + +// NewConn creates a new secure channel instance given the other party role and +// handshaking result. +func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { + newCrypto := protocols[recordProtocol] + if newCrypto == nil { + return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) + } + crypto, err := newCrypto(side, key) + if err != nil { + return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) + } + overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() + payloadLengthLimit := altsRecordDefaultLength - overhead + if protected == nil { + // We pre-allocate protected to be of size + // 2*altsRecordDefaultLength-1 during initialization. We only + // read from the network into protected when protected does not + // contain a complete frame, which is at most + // altsRecordDefaultLength-1 (bytes). And we read at most + // altsRecordDefaultLength (bytes) data into protected at one + // time. Therefore, 2*altsRecordDefaultLength-1 is large enough + // to buffer data read from the network. + protected = make([]byte, 0, 2*altsRecordDefaultLength-1) + } + + altsConn := &conn{ + Conn: c, + crypto: crypto, + payloadLengthLimit: payloadLengthLimit, + protected: protected, + writeBuf: make([]byte, altsWriteBufferInitialSize), + nextFrame: protected, + overhead: overhead, + } + return altsConn, nil +} + +// Read reads and decrypts a frame from the underlying connection, and copies the +// decrypted payload into b. If the size of the payload is greater than len(b), +// Read retains the remaining bytes in an internal buffer, and subsequent calls +// to Read will read from this buffer until it is exhausted. +func (p *conn) Read(b []byte) (n int, err error) { + if len(p.buf) == 0 { + var framedMsg []byte + framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) + if err != nil { + return n, err + } + // Check whether the next frame to be decrypted has been + // completely received yet. + if len(framedMsg) == 0 { + copy(p.protected, p.nextFrame) + p.protected = p.protected[:len(p.nextFrame)] + // Always copy next incomplete frame to the beginning of + // the protected buffer and reset nextFrame to it. + p.nextFrame = p.protected + } + // Check whether a complete frame has been received yet. + for len(framedMsg) == 0 { + if len(p.protected) == cap(p.protected) { + tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) + copy(tmp, p.protected) + p.protected = tmp + } + n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) + if err != nil { + return 0, err + } + p.protected = p.protected[:len(p.protected)+n] + framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) + if err != nil { + return 0, err + } + } + // Now we have a complete frame, decrypted it. + msg := framedMsg[MsgLenFieldSize:] + msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) + if msgType&0xff != altsRecordMsgType { + return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", + msgType, altsRecordMsgType) + } + ciphertext := msg[msgTypeFieldSize:] + + // Decrypt requires that if the dst and ciphertext alias, they + // must alias exactly. Code here used to use msg[:0], but msg + // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than + // ciphertext, so they alias inexactly. Using ciphertext[:0] + // arranges the appropriate aliasing without needing to copy + // ciphertext or use a separate destination buffer. For more info + // check: https://golang.org/pkg/crypto/cipher/#AEAD. + p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) + if err != nil { + return 0, err + } + } + + n = copy(b, p.buf) + p.buf = p.buf[n:] + return n, nil +} + +// Write encrypts, frames, and writes bytes from b to the underlying connection. +func (p *conn) Write(b []byte) (n int, err error) { + n = len(b) + // Calculate the output buffer size with framing and encryption overhead. + numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) + size := len(b) + numOfFrames*p.overhead + // If writeBuf is too small, increase its size up to the maximum size. + partialBSize := len(b) + if size > altsWriteBufferMaxSize { + size = altsWriteBufferMaxSize + const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength + partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit + } + if len(p.writeBuf) < size { + p.writeBuf = make([]byte, size) + } + + for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { + partialBEnd := partialBStart + partialBSize + if partialBEnd > len(b) { + partialBEnd = len(b) + } + partialB := b[partialBStart:partialBEnd] + writeBufIndex := 0 + for len(partialB) > 0 { + payloadLen := len(partialB) + if payloadLen > p.payloadLengthLimit { + payloadLen = p.payloadLengthLimit + } + buf := partialB[:payloadLen] + partialB = partialB[payloadLen:] + + // Write buffer contains: length, type, payload, and tag + // if any. + + // 1. Fill in type field. + msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] + binary.LittleEndian.PutUint32(msg, altsRecordMsgType) + + // 2. Encrypt the payload and create a tag if any. + msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) + if err != nil { + return n, err + } + + // 3. Fill in the size field. + binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) + + // 4. Increase writeBufIndex. + writeBufIndex += len(buf) + p.overhead + } + nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) + if err != nil { + // We need to calculate the actual data size that was + // written. This means we need to remove header, + // encryption overheads, and any partially-written + // frame data. + numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) + return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err + } + } + return n, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go new file mode 100644 index 000000000..84821fa25 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import core "google.golang.org/grpc/credentials/alts/internal" + +// NewOutCounter returns an outgoing counter initialized to the starting sequence +// number for the client/server side of a connection. +func NewOutCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ServerSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// NewInCounter returns an incoming counter initialized to the starting sequence +// number for the client/server side of a connection. This is used in ALTS record +// to check that incoming counters are as expected, since ALTS record guarantees +// that messages are unwrapped in the same order that the peer wrapped them. +func NewInCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ClientSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// CounterFromValue creates a new counter given an initial value. +func CounterFromValue(value []byte, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + copy(c.value[:], value) + return +} + +// CounterSide returns the connection side (client/server) a sequence counter is +// associated with. +func CounterSide(c []byte) core.Side { + if c[counterLen-1]&0x80 == 0x80 { + return core.ServerSide + } + return core.ClientSide +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go new file mode 100644 index 000000000..49c22c1e8 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -0,0 +1,365 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker provides ALTS handshaking functionality for GCP. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/authinfo" + "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +const ( + // The maximum byte size of receive frames. + frameLimit = 64 * 1024 // 64 KB + rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" + // maxPendingHandshakes represents the maximum number of concurrent + // handshakes. + maxPendingHandshakes = 100 +) + +var ( + hsProtocol = altspb.HandshakeProtocol_ALTS + appProtocols = []string{"grpc"} + recordProtocols = []string{rekeyRecordProtocolName} + keyLength = map[string]int{ + rekeyRecordProtocolName: 44, + } + altsRecordFuncs = map[string]conn.ALTSRecordFunc{ + // ALTS handshaker protocols. + rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { + return conn.NewAES128GCMRekey(s, keyData) + }, + } + // control number of concurrent created (but not closed) handshakers. + mu sync.Mutex + concurrentHandshakes = int64(0) + // errDropped occurs when maxPendingHandshakes is reached. + errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") +) + +func init() { + for protocol, f := range altsRecordFuncs { + if err := conn.RegisterProtocol(protocol, f); err != nil { + panic(err) + } + } +} + +func acquire(n int64) bool { + mu.Lock() + success := maxPendingHandshakes-concurrentHandshakes >= n + if success { + concurrentHandshakes += n + } + mu.Unlock() + return success +} + +func release(n int64) { + mu.Lock() + concurrentHandshakes -= n + if concurrentHandshakes < 0 { + mu.Unlock() + panic("bad release") + } + mu.Unlock() +} + +// ClientHandshakerOptions contains the client handshaker options that can +// provided by the caller. +type ClientHandshakerOptions struct { + // ClientIdentity is the handshaker client local identity. + ClientIdentity *altspb.Identity + // TargetName is the server service account name for secure name + // checking. + TargetName string + // TargetServiceAccounts contains a list of expected target service + // accounts. One of these accounts should match one of the accounts in + // the handshaker results. Otherwise, the handshake fails. + TargetServiceAccounts []string + // RPCVersions specifies the gRPC versions accepted by the client. + RPCVersions *altspb.RpcProtocolVersions +} + +// ServerHandshakerOptions contains the server handshaker options that can +// provided by the caller. +type ServerHandshakerOptions struct { + // RPCVersions specifies the gRPC versions accepted by the server. + RPCVersions *altspb.RpcProtocolVersions +} + +// DefaultClientHandshakerOptions returns the default client handshaker options. +func DefaultClientHandshakerOptions() *ClientHandshakerOptions { + return &ClientHandshakerOptions{} +} + +// DefaultServerHandshakerOptions returns the default client handshaker options. +func DefaultServerHandshakerOptions() *ServerHandshakerOptions { + return &ServerHandshakerOptions{} +} + +// TODO: add support for future local and remote endpoint in both client options +// and server options (server options struct does not exist now. When +// caller can provide endpoints, it should be created. + +// altsHandshaker is used to complete a ALTS handshaking between client and +// server. This handshaker talks to the ALTS handshaker service in the metadata +// server. +type altsHandshaker struct { + // RPC stream used to access the ALTS Handshaker service. + stream altsgrpc.HandshakerService_DoHandshakeClient + // the connection to the peer. + conn net.Conn + // client handshake options. + clientOpts *ClientHandshakerOptions + // server handshake options. + serverOpts *ServerHandshakerOptions + // defines the side doing the handshake, client or server. + side core.Side +} + +// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + side: core.ClientSide, + }, nil +} + +// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + side: core.ServerSide, + }, nil +} + +// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// done, ClientHandshake returns a secure connection. +func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire(1) { + return nil, nil, errDropped + } + defer release(1) + + if h.side != core.ClientSide { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") + } + + // Create target identities from service account list. + targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) + for _, account := range h.clientOpts.TargetServiceAccounts { + targetIdentities = append(targetIdentities, &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: account, + }, + }) + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ClientStart{ + ClientStart: &altspb.StartClientHandshakeReq{ + HandshakeSecurityProtocol: hsProtocol, + ApplicationProtocols: appProtocols, + RecordProtocols: recordProtocols, + TargetIdentities: targetIdentities, + LocalIdentity: h.clientOpts.ClientIdentity, + TargetName: h.clientOpts.TargetName, + RpcVersions: h.clientOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// done, ServerHandshake returns a secure connection. +func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire(1) { + return nil, nil, errDropped + } + defer release(1) + + if h.side != core.ServerSide { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") + } + + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + + // Prepare server parameters. + // TODO: currently only ALTS parameters are provided. Might need to use + // more options in the future. + params := make(map[int32]*altspb.ServerHandshakeParameters) + params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ + RecordProtocols: recordProtocols, + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ServerStart{ + ServerStart: &altspb.StartServerHandshakeReq{ + ApplicationProtocols: appProtocols, + HandshakeParameters: params, + InBytes: p[:n], + RpcVersions: h.serverOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check of the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + + var extra []byte + if req.GetServerStart() != nil { + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + // The handshaker returns a 128 bytes key. It should be truncated based + // on the returned record protocol. + keyLen, ok := keyLength[result.RecordProtocol] + if !ok { + return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) + } + sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) + if err != nil { + return nil, nil, err + } + return sc, result, nil +} + +func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone processes the handshake until the handshaker service returns +// the results. Handshaker service takes care of frame parsing, so we read +// whatever received from the network and send it to the handshaker service. +func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, extra, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service, and + // nothing is received from the peer, then we are stuck. + // This covers the case when the peer is not responding. Note + // that handshaker service connection issues are caught in + // accessHandshakerService before we even get here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, core.PeerNotRespondingError + } + // Append extra bytes from the previous interaction with the + // handshaker service with the current buffer read from conn. + p := append(extra, buf[:n]...) + resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_Next{ + Next: &altspb.NextHandshakeMessageReq{ + InBytes: p, + }, + }, + }) + if err != nil { + return nil, nil, err + } + // Set extra based on handshaker service response. + if n == 0 { + extra = nil + } else { + extra = buf[resp.GetBytesConsumed():n] + } + } +} + +// Close terminates the Handshaker. It should be called when the caller obtains +// the secure connection. +func (h *altsHandshaker) Close() { + h.stream.CloseSend() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go new file mode 100644 index 000000000..0c7b56835 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service manages connections between the VM application and the ALTS +// handshaker service. +package service + +import ( + "sync" + + grpc "google.golang.org/grpc" +) + +var ( + // hsConn represents a connection to hypervisor handshaker service. + hsConn *grpc.ClientConn + mu sync.Mutex + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +// Dial dials the handshake service in the hypervisor. If a connection has +// already been established, this function returns it. Otherwise, a new +// connection is created. +func Dial(hsAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + if hsConn == nil { + // Create a new connection to the handshaker service. Note that + // this connection stays open until the application is closed. + var err error + hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + if err != nil { + return nil, err + } + } + return hsConn, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go new file mode 100644 index 000000000..d1793073d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/altscontext.proto + +package grpc_gcp // import "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AltsContext struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // The security level of the created secure channel. + SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` + // The peer service account. + PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` + // The local service account. + LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // Additional attributes of the peer. + PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AltsContext) Reset() { *m = AltsContext{} } +func (m *AltsContext) String() string { return proto.CompactTextString(m) } +func (*AltsContext) ProtoMessage() {} +func (*AltsContext) Descriptor() ([]byte, []int) { + return fileDescriptor_altscontext_f6b7868f9a30497f, []int{0} +} +func (m *AltsContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AltsContext.Unmarshal(m, b) +} +func (m *AltsContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AltsContext.Marshal(b, m, deterministic) +} +func (dst *AltsContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_AltsContext.Merge(dst, src) +} +func (m *AltsContext) XXX_Size() int { + return xxx_messageInfo_AltsContext.Size(m) +} +func (m *AltsContext) XXX_DiscardUnknown() { + xxx_messageInfo_AltsContext.DiscardUnknown(m) +} + +var xxx_messageInfo_AltsContext proto.InternalMessageInfo + +func (m *AltsContext) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *AltsContext) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *AltsContext) GetSecurityLevel() SecurityLevel { + if m != nil { + return m.SecurityLevel + } + return SecurityLevel_SECURITY_NONE +} + +func (m *AltsContext) GetPeerServiceAccount() string { + if m != nil { + return m.PeerServiceAccount + } + return "" +} + +func (m *AltsContext) GetLocalServiceAccount() string { + if m != nil { + return m.LocalServiceAccount + } + return "" +} + +func (m *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +func (m *AltsContext) GetPeerAttributes() map[string]string { + if m != nil { + return m.PeerAttributes + } + return nil +} + +func init() { + proto.RegisterType((*AltsContext)(nil), "grpc.gcp.AltsContext") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.AltsContext.PeerAttributesEntry") +} + +func init() { + proto.RegisterFile("grpc/gcp/altscontext.proto", fileDescriptor_altscontext_f6b7868f9a30497f) +} + +var fileDescriptor_altscontext_f6b7868f9a30497f = []byte{ + // 411 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4d, 0x6f, 0x13, 0x31, + 0x10, 0x86, 0xb5, 0x0d, 0x2d, 0xe0, 0x88, 0xb4, 0xb8, 0xa9, 0x58, 0x45, 0x42, 0x8a, 0xb8, 0xb0, + 0x5c, 0x76, 0x21, 0x5c, 0x10, 0x07, 0x50, 0x8a, 0x38, 0x20, 0x71, 0x88, 0xb6, 0x12, 0x07, 0x2e, + 0x2b, 0x77, 0x3a, 0xb2, 0x2c, 0x5c, 0x8f, 0x35, 0x76, 0x22, 0xf2, 0xb3, 0xf9, 0x07, 0x68, 0xed, + 0xcd, 0x07, 0x1f, 0xb7, 0x9d, 0x79, 0x9f, 0x19, 0xbf, 0xb3, 0x33, 0x62, 0xa6, 0xd9, 0x43, 0xa3, + 0xc1, 0x37, 0xca, 0xc6, 0x00, 0xe4, 0x22, 0xfe, 0x8c, 0xb5, 0x67, 0x8a, 0x24, 0x1f, 0xf5, 0x5a, + 0xad, 0xc1, 0xcf, 0xaa, 0x3d, 0x15, 0x59, 0xb9, 0xe0, 0x89, 0x63, 0x17, 0x10, 0xd6, 0x6c, 0xe2, + 0xb6, 0x03, 0xba, 0xbf, 0x27, 0x97, 0x6b, 0x5e, 0xfc, 0x1a, 0x89, 0xf1, 0xd2, 0xc6, 0xf0, 0x29, + 0x77, 0x92, 0x6f, 0xc4, 0x54, 0x79, 0x6f, 0x0d, 0xa8, 0x68, 0xc8, 0x75, 0x09, 0x02, 0xb2, 0x65, + 0x31, 0x2f, 0xaa, 0xc7, 0xed, 0xe5, 0x91, 0xb6, 0x1a, 0x24, 0xf9, 0x52, 0x9c, 0x33, 0x02, 0xf1, + 0xdd, 0x81, 0x3e, 0x49, 0xf4, 0x24, 0xa7, 0xf7, 0xe0, 0x07, 0x31, 0xd9, 0x9b, 0xb0, 0xb8, 0x41, + 0x5b, 0x8e, 0xe6, 0x45, 0x35, 0x59, 0x3c, 0xab, 0x77, 0xc6, 0xeb, 0x9b, 0x41, 0xff, 0xda, 0xcb, + 0xed, 0x93, 0x70, 0x1c, 0xca, 0xd7, 0x62, 0xea, 0x11, 0xb9, 0x0b, 0xc8, 0x1b, 0x03, 0xd8, 0x29, + 0x00, 0x5a, 0xbb, 0x58, 0x3e, 0x48, 0xaf, 0xc9, 0x5e, 0xbb, 0xc9, 0xd2, 0x32, 0x2b, 0x72, 0x21, + 0xae, 0x2c, 0x81, 0xb2, 0xff, 0x94, 0x9c, 0xe6, 0x71, 0x92, 0xf8, 0x57, 0xcd, 0x17, 0xf1, 0x34, + 0xbd, 0xc2, 0x1e, 0xba, 0x0d, 0x72, 0x30, 0xe4, 0x42, 0x79, 0x36, 0x2f, 0xaa, 0xf1, 0xe2, 0xf9, + 0xc1, 0x68, 0xeb, 0x61, 0x37, 0xd7, 0xb7, 0x01, 0x6a, 0xcf, 0xfb, 0xba, 0xd6, 0xc3, 0x2e, 0x21, + 0x5b, 0x91, 0x52, 0x9d, 0x8a, 0x91, 0xcd, 0xed, 0x3a, 0x62, 0x28, 0x1f, 0xce, 0x47, 0xd5, 0x78, + 0xf1, 0xea, 0xd0, 0xe8, 0xe8, 0xe7, 0xd7, 0x2b, 0x44, 0x5e, 0xee, 0xd9, 0xcf, 0x2e, 0xf2, 0xb6, + 0x9d, 0xf8, 0x3f, 0x92, 0xb3, 0xa5, 0xb8, 0xfc, 0x0f, 0x26, 0x2f, 0xc4, 0xe8, 0x07, 0x6e, 0x87, + 0x35, 0xf5, 0x9f, 0x72, 0x2a, 0x4e, 0x37, 0xca, 0xae, 0x71, 0x58, 0x46, 0x0e, 0xde, 0x9f, 0xbc, + 0x2b, 0xae, 0xad, 0xb8, 0x32, 0x94, 0x1d, 0xf4, 0x47, 0x54, 0x1b, 0x17, 0x91, 0x9d, 0xb2, 0xd7, + 0x17, 0x47, 0x66, 0xd2, 0x74, 0xab, 0xe2, 0xfb, 0x47, 0x4d, 0xa4, 0x2d, 0xd6, 0x9a, 0xac, 0x72, + 0xba, 0x26, 0xd6, 0x4d, 0x3a, 0x2e, 0x60, 0xbc, 0x43, 0x17, 0x8d, 0xb2, 0x21, 0x9d, 0x62, 0xb3, + 0xeb, 0xd2, 0xa4, 0x2b, 0x48, 0x50, 0xa7, 0xc1, 0xdf, 0x9e, 0xa5, 0xf8, 0xed, 0xef, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x9b, 0x8c, 0xe4, 0x6a, 0xba, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go new file mode 100644 index 000000000..0c37ba2ab --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -0,0 +1,1196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/handshaker.proto + +package grpc_gcp // import "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HandshakeProtocol int32 + +const ( + // Default value. + HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 + // TLS handshake protocol. + HandshakeProtocol_TLS HandshakeProtocol = 1 + // Application Layer Transport Security handshake protocol. + HandshakeProtocol_ALTS HandshakeProtocol = 2 +) + +var HandshakeProtocol_name = map[int32]string{ + 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", + 1: "TLS", + 2: "ALTS", +} +var HandshakeProtocol_value = map[string]int32{ + "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, + "TLS": 1, + "ALTS": 2, +} + +func (x HandshakeProtocol) String() string { + return proto.EnumName(HandshakeProtocol_name, int32(x)) +} +func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{0} +} + +type NetworkProtocol int32 + +const ( + NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 + NetworkProtocol_TCP NetworkProtocol = 1 + NetworkProtocol_UDP NetworkProtocol = 2 +) + +var NetworkProtocol_name = map[int32]string{ + 0: "NETWORK_PROTOCOL_UNSPECIFIED", + 1: "TCP", + 2: "UDP", +} +var NetworkProtocol_value = map[string]int32{ + "NETWORK_PROTOCOL_UNSPECIFIED": 0, + "TCP": 1, + "UDP": 2, +} + +func (x NetworkProtocol) String() string { + return proto.EnumName(NetworkProtocol_name, int32(x)) +} +func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{1} +} + +type Endpoint struct { + // IP address. It should contain an IPv4 or IPv6 string literal, e.g. + // "192.168.0.1" or "2001:db8::1". + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Port number. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // Network protocol (e.g., TCP, UDP) associated with this endpoint. + Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (m *Endpoint) String() string { return proto.CompactTextString(m) } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Endpoint.Unmarshal(m, b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) +} +func (dst *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(dst, src) +} +func (m *Endpoint) XXX_Size() int { + return xxx_messageInfo_Endpoint.Size(m) +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *Endpoint) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *Endpoint) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Endpoint) GetProtocol() NetworkProtocol { + if m != nil { + return m.Protocol + } + return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED +} + +type Identity struct { + // Types that are valid to be assigned to IdentityOneof: + // *Identity_ServiceAccount + // *Identity_Hostname + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional attributes of the identity. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Identity) Reset() { *m = Identity{} } +func (m *Identity) String() string { return proto.CompactTextString(m) } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{1} +} +func (m *Identity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Identity.Unmarshal(m, b) +} +func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Identity.Marshal(b, m, deterministic) +} +func (dst *Identity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Identity.Merge(dst, src) +} +func (m *Identity) XXX_Size() int { + return xxx_messageInfo_Identity.Size(m) +} +func (m *Identity) XXX_DiscardUnknown() { + xxx_messageInfo_Identity.DiscardUnknown(m) +} + +var xxx_messageInfo_Identity proto.InternalMessageInfo + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_ServiceAccount struct { + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` +} + +type Identity_Hostname struct { + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (m *Identity) GetServiceAccount() string { + if x, ok := m.GetIdentityOneof().(*Identity_ServiceAccount); ok { + return x.ServiceAccount + } + return "" +} + +func (m *Identity) GetHostname() string { + if x, ok := m.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (m *Identity) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Identity) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Identity_OneofMarshaler, _Identity_OneofUnmarshaler, _Identity_OneofSizer, []interface{}{ + (*Identity_ServiceAccount)(nil), + (*Identity_Hostname)(nil), + } +} + +func _Identity_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Identity) + // identity_oneof + switch x := m.IdentityOneof.(type) { + case *Identity_ServiceAccount: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ServiceAccount) + case *Identity_Hostname: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Hostname) + case nil: + default: + return fmt.Errorf("Identity.IdentityOneof has unexpected type %T", x) + } + return nil +} + +func _Identity_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Identity) + switch tag { + case 1: // identity_oneof.service_account + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdentityOneof = &Identity_ServiceAccount{x} + return true, err + case 2: // identity_oneof.hostname + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdentityOneof = &Identity_Hostname{x} + return true, err + default: + return false, nil + } +} + +func _Identity_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Identity) + // identity_oneof + switch x := m.IdentityOneof.(type) { + case *Identity_ServiceAccount: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ServiceAccount))) + n += len(x.ServiceAccount) + case *Identity_Hostname: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Hostname))) + n += len(x.Hostname) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type StartClientHandshakeReq struct { + // Handshake security protocol requested by the client. + HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` + // The application protocols supported by the client, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // The record protocols supported by the client, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, handshake will fail. + TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, the + // handshaker chooses a default local identity. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // (Optional) Local endpoint information of the connection to the server, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote server, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) If target name is provided, a secure naming check is performed + // to verify that the peer authenticated identity is indeed authorized to run + // the target name. + TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` + // (Optional) RPC protocol versions supported by the client. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClientHandshakeReq) Reset() { *m = StartClientHandshakeReq{} } +func (m *StartClientHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartClientHandshakeReq) ProtoMessage() {} +func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{2} +} +func (m *StartClientHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClientHandshakeReq.Unmarshal(m, b) +} +func (m *StartClientHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClientHandshakeReq.Marshal(b, m, deterministic) +} +func (dst *StartClientHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClientHandshakeReq.Merge(dst, src) +} +func (m *StartClientHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartClientHandshakeReq.Size(m) +} +func (m *StartClientHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartClientHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClientHandshakeReq proto.InternalMessageInfo + +func (m *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { + if m != nil { + return m.HandshakeSecurityProtocol + } + return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED +} + +func (m *StartClientHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetIdentities() []*Identity { + if m != nil { + return m.TargetIdentities + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetName() string { + if m != nil { + return m.TargetName + } + return "" +} + +func (m *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +type ServerHandshakeParameters struct { + // The record protocols supported by the server, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, the handshaker chooses a default local identity. + LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHandshakeParameters) Reset() { *m = ServerHandshakeParameters{} } +func (m *ServerHandshakeParameters) String() string { return proto.CompactTextString(m) } +func (*ServerHandshakeParameters) ProtoMessage() {} +func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{3} +} +func (m *ServerHandshakeParameters) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHandshakeParameters.Unmarshal(m, b) +} +func (m *ServerHandshakeParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHandshakeParameters.Marshal(b, m, deterministic) +} +func (dst *ServerHandshakeParameters) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHandshakeParameters.Merge(dst, src) +} +func (m *ServerHandshakeParameters) XXX_Size() int { + return xxx_messageInfo_ServerHandshakeParameters.Size(m) +} +func (m *ServerHandshakeParameters) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHandshakeParameters.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHandshakeParameters proto.InternalMessageInfo + +func (m *ServerHandshakeParameters) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *ServerHandshakeParameters) GetLocalIdentities() []*Identity { + if m != nil { + return m.LocalIdentities + } + return nil +} + +type StartServerHandshakeReq struct { + // The application protocols supported by the server, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // Handshake parameters (record protocols and local identities supported by + // the server) mapped by the handshake protocol. Each handshake security + // protocol (e.g., TLS or ALTS) has its own set of record protocols and local + // identities. Since protobuf does not support enum as key to the map, the key + // to handshake_parameters is the integer value of HandshakeProtocol enum. + HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple HandshakReq messages. + InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // (Optional) Local endpoint information of the connection to the client, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote client, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) RPC protocol versions supported by the server. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartServerHandshakeReq) Reset() { *m = StartServerHandshakeReq{} } +func (m *StartServerHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartServerHandshakeReq) ProtoMessage() {} +func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{4} +} +func (m *StartServerHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartServerHandshakeReq.Unmarshal(m, b) +} +func (m *StartServerHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartServerHandshakeReq.Marshal(b, m, deterministic) +} +func (dst *StartServerHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartServerHandshakeReq.Merge(dst, src) +} +func (m *StartServerHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartServerHandshakeReq.Size(m) +} +func (m *StartServerHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartServerHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartServerHandshakeReq proto.InternalMessageInfo + +func (m *StartServerHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { + if m != nil { + return m.HandshakeParameters + } + return nil +} + +func (m *StartServerHandshakeReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +func (m *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +type NextHandshakeMessageReq struct { + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple NextHandshakerMessageReq + // messages. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NextHandshakeMessageReq) Reset() { *m = NextHandshakeMessageReq{} } +func (m *NextHandshakeMessageReq) String() string { return proto.CompactTextString(m) } +func (*NextHandshakeMessageReq) ProtoMessage() {} +func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{5} +} +func (m *NextHandshakeMessageReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NextHandshakeMessageReq.Unmarshal(m, b) +} +func (m *NextHandshakeMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NextHandshakeMessageReq.Marshal(b, m, deterministic) +} +func (dst *NextHandshakeMessageReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextHandshakeMessageReq.Merge(dst, src) +} +func (m *NextHandshakeMessageReq) XXX_Size() int { + return xxx_messageInfo_NextHandshakeMessageReq.Size(m) +} +func (m *NextHandshakeMessageReq) XXX_DiscardUnknown() { + xxx_messageInfo_NextHandshakeMessageReq.DiscardUnknown(m) +} + +var xxx_messageInfo_NextHandshakeMessageReq proto.InternalMessageInfo + +func (m *NextHandshakeMessageReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +type HandshakerReq struct { + // Types that are valid to be assigned to ReqOneof: + // *HandshakerReq_ClientStart + // *HandshakerReq_ServerStart + // *HandshakerReq_Next + ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerReq) Reset() { *m = HandshakerReq{} } +func (m *HandshakerReq) String() string { return proto.CompactTextString(m) } +func (*HandshakerReq) ProtoMessage() {} +func (*HandshakerReq) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{6} +} +func (m *HandshakerReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerReq.Unmarshal(m, b) +} +func (m *HandshakerReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerReq.Marshal(b, m, deterministic) +} +func (dst *HandshakerReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerReq.Merge(dst, src) +} +func (m *HandshakerReq) XXX_Size() int { + return xxx_messageInfo_HandshakerReq.Size(m) +} +func (m *HandshakerReq) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerReq.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerReq proto.InternalMessageInfo + +type isHandshakerReq_ReqOneof interface { + isHandshakerReq_ReqOneof() +} + +type HandshakerReq_ClientStart struct { + ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type HandshakerReq_ServerStart struct { + ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type HandshakerReq_Next struct { + Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} + +func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (m *HandshakerReq) GetClientStart() *StartClientHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (m *HandshakerReq) GetServerStart() *StartServerHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (m *HandshakerReq) GetNext() *NextHandshakeMessageReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_Next); ok { + return x.Next + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HandshakerReq) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HandshakerReq_OneofMarshaler, _HandshakerReq_OneofUnmarshaler, _HandshakerReq_OneofSizer, []interface{}{ + (*HandshakerReq_ClientStart)(nil), + (*HandshakerReq_ServerStart)(nil), + (*HandshakerReq_Next)(nil), + } +} + +func _HandshakerReq_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HandshakerReq) + // req_oneof + switch x := m.ReqOneof.(type) { + case *HandshakerReq_ClientStart: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientStart); err != nil { + return err + } + case *HandshakerReq_ServerStart: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerStart); err != nil { + return err + } + case *HandshakerReq_Next: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Next); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HandshakerReq.ReqOneof has unexpected type %T", x) + } + return nil +} + +func _HandshakerReq_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HandshakerReq) + switch tag { + case 1: // req_oneof.client_start + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StartClientHandshakeReq) + err := b.DecodeMessage(msg) + m.ReqOneof = &HandshakerReq_ClientStart{msg} + return true, err + case 2: // req_oneof.server_start + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(StartServerHandshakeReq) + err := b.DecodeMessage(msg) + m.ReqOneof = &HandshakerReq_ServerStart{msg} + return true, err + case 3: // req_oneof.next + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NextHandshakeMessageReq) + err := b.DecodeMessage(msg) + m.ReqOneof = &HandshakerReq_Next{msg} + return true, err + default: + return false, nil + } +} + +func _HandshakerReq_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HandshakerReq) + // req_oneof + switch x := m.ReqOneof.(type) { + case *HandshakerReq_ClientStart: + s := proto.Size(x.ClientStart) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *HandshakerReq_ServerStart: + s := proto.Size(x.ServerStart) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *HandshakerReq_Next: + s := proto.Size(x.Next) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type HandshakerResult struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // Cryptographic key data. The key data may be more than the key length + // required for the record protocol, thus the client of the handshaker + // service needs to truncate the key data into the right key length. + KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used in the handshake. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // Indicate whether the handshaker service client should keep the channel + // between the handshaker service open, e.g., in order to handle + // post-handshake messages in the future. + KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResult) Reset() { *m = HandshakerResult{} } +func (m *HandshakerResult) String() string { return proto.CompactTextString(m) } +func (*HandshakerResult) ProtoMessage() {} +func (*HandshakerResult) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{7} +} +func (m *HandshakerResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResult.Unmarshal(m, b) +} +func (m *HandshakerResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResult.Marshal(b, m, deterministic) +} +func (dst *HandshakerResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResult.Merge(dst, src) +} +func (m *HandshakerResult) XXX_Size() int { + return xxx_messageInfo_HandshakerResult.Size(m) +} +func (m *HandshakerResult) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResult.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResult proto.InternalMessageInfo + +func (m *HandshakerResult) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *HandshakerResult) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *HandshakerResult) GetKeyData() []byte { + if m != nil { + return m.KeyData + } + return nil +} + +func (m *HandshakerResult) GetPeerIdentity() *Identity { + if m != nil { + return m.PeerIdentity + } + return nil +} + +func (m *HandshakerResult) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *HandshakerResult) GetKeepChannelOpen() bool { + if m != nil { + return m.KeepChannelOpen + } + return false +} + +func (m *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +type HandshakerStatus struct { + // The status code. This could be the gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerStatus) Reset() { *m = HandshakerStatus{} } +func (m *HandshakerStatus) String() string { return proto.CompactTextString(m) } +func (*HandshakerStatus) ProtoMessage() {} +func (*HandshakerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{8} +} +func (m *HandshakerStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerStatus.Unmarshal(m, b) +} +func (m *HandshakerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerStatus.Marshal(b, m, deterministic) +} +func (dst *HandshakerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerStatus.Merge(dst, src) +} +func (m *HandshakerStatus) XXX_Size() int { + return xxx_messageInfo_HandshakerStatus.Size(m) +} +func (m *HandshakerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerStatus proto.InternalMessageInfo + +func (m *HandshakerStatus) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *HandshakerStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +type HandshakerResp struct { + // Frames to be given to the peer for the NextHandshakeMessageReq. May be + // empty if no out_frames have to be sent to the peer or if in_bytes in the + // HandshakerReq are incomplete. All the non-empty out frames must be sent to + // the peer even if the handshaker status is not OK as these frames may + // contain the alert frames. + OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes consumed by the handshaker. It is possible + // that part of in_bytes in HandshakerReq was unrelated to the handshake + // process. + BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set iff the handshake was successful. out_frames may still be set + // to frames that needs to be forwarded to the peer. + Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + // Status of the handshaker. + Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResp) Reset() { *m = HandshakerResp{} } +func (m *HandshakerResp) String() string { return proto.CompactTextString(m) } +func (*HandshakerResp) ProtoMessage() {} +func (*HandshakerResp) Descriptor() ([]byte, []int) { + return fileDescriptor_handshaker_1dfe659b12ea825e, []int{9} +} +func (m *HandshakerResp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResp.Unmarshal(m, b) +} +func (m *HandshakerResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResp.Marshal(b, m, deterministic) +} +func (dst *HandshakerResp) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResp.Merge(dst, src) +} +func (m *HandshakerResp) XXX_Size() int { + return xxx_messageInfo_HandshakerResp.Size(m) +} +func (m *HandshakerResp) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResp.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResp proto.InternalMessageInfo + +func (m *HandshakerResp) GetOutFrames() []byte { + if m != nil { + return m.OutFrames + } + return nil +} + +func (m *HandshakerResp) GetBytesConsumed() uint32 { + if m != nil { + return m.BytesConsumed + } + return 0 +} + +func (m *HandshakerResp) GetResult() *HandshakerResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *HandshakerResp) GetStatus() *HandshakerStatus { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterType((*Endpoint)(nil), "grpc.gcp.Endpoint") + proto.RegisterType((*Identity)(nil), "grpc.gcp.Identity") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.Identity.AttributesEntry") + proto.RegisterType((*StartClientHandshakeReq)(nil), "grpc.gcp.StartClientHandshakeReq") + proto.RegisterType((*ServerHandshakeParameters)(nil), "grpc.gcp.ServerHandshakeParameters") + proto.RegisterType((*StartServerHandshakeReq)(nil), "grpc.gcp.StartServerHandshakeReq") + proto.RegisterMapType((map[int32]*ServerHandshakeParameters)(nil), "grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry") + proto.RegisterType((*NextHandshakeMessageReq)(nil), "grpc.gcp.NextHandshakeMessageReq") + proto.RegisterType((*HandshakerReq)(nil), "grpc.gcp.HandshakerReq") + proto.RegisterType((*HandshakerResult)(nil), "grpc.gcp.HandshakerResult") + proto.RegisterType((*HandshakerStatus)(nil), "grpc.gcp.HandshakerStatus") + proto.RegisterType((*HandshakerResp)(nil), "grpc.gcp.HandshakerResp") + proto.RegisterEnum("grpc.gcp.HandshakeProtocol", HandshakeProtocol_name, HandshakeProtocol_value) + proto.RegisterEnum("grpc.gcp.NetworkProtocol", NetworkProtocol_name, NetworkProtocol_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// HandshakerServiceClient is the client API for HandshakerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HandshakerServiceClient interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) +} + +type handshakerServiceClient struct { + cc *grpc.ClientConn +} + +func NewHandshakerServiceClient(cc *grpc.ClientConn) HandshakerServiceClient { + return &handshakerServiceClient{cc} +} + +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { + stream, err := c.cc.NewStream(ctx, &_HandshakerService_serviceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + if err != nil { + return nil, err + } + x := &handshakerServiceDoHandshakeClient{stream} + return x, nil +} + +type HandshakerService_DoHandshakeClient interface { + Send(*HandshakerReq) error + Recv() (*HandshakerResp, error) + grpc.ClientStream +} + +type handshakerServiceDoHandshakeClient struct { + grpc.ClientStream +} + +func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { + m := new(HandshakerResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HandshakerServiceServer is the server API for HandshakerService service. +type HandshakerServiceServer interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(HandshakerService_DoHandshakeServer) error +} + +func RegisterHandshakerServiceServer(s *grpc.Server, srv HandshakerServiceServer) { + s.RegisterService(&_HandshakerService_serviceDesc, srv) +} + +func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) +} + +type HandshakerService_DoHandshakeServer interface { + Send(*HandshakerResp) error + Recv() (*HandshakerReq, error) + grpc.ServerStream +} + +type handshakerServiceDoHandshakeServer struct { + grpc.ServerStream +} + +func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { + m := new(HandshakerReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _HandshakerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gcp.HandshakerService", + HandlerType: (*HandshakerServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoHandshake", + Handler: _HandshakerService_DoHandshake_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/gcp/handshaker.proto", +} + +func init() { + proto.RegisterFile("grpc/gcp/handshaker.proto", fileDescriptor_handshaker_1dfe659b12ea825e) +} + +var fileDescriptor_handshaker_1dfe659b12ea825e = []byte{ + // 1168 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x6e, 0x1a, 0xc7, + 0x17, 0xf6, 0x02, 0xb6, 0xf1, 0xc1, 0xfc, 0xf1, 0xc4, 0x51, 0xd6, 0x4e, 0xf2, 0xfb, 0x51, 0xaa, + 0xaa, 0x24, 0x17, 0xd0, 0x92, 0x56, 0x69, 0x52, 0x45, 0x09, 0x60, 0x2c, 0xdc, 0xa4, 0x18, 0x2d, + 0x4e, 0x2b, 0x35, 0x17, 0xab, 0xc9, 0x32, 0xc1, 0x2b, 0x96, 0x99, 0xf5, 0xcc, 0xe0, 0x86, 0x07, + 0xe8, 0xe3, 0xf4, 0x15, 0xfa, 0x36, 0x95, 0xfa, 0x00, 0xbd, 0x6f, 0xb5, 0xb3, 0xb3, 0x7f, 0xc0, + 0x10, 0x25, 0xea, 0xdd, 0xee, 0x99, 0xef, 0x3b, 0x7b, 0xe6, 0x3b, 0xdf, 0x9c, 0x1d, 0x38, 0x9a, + 0x70, 0xdf, 0x69, 0x4e, 0x1c, 0xbf, 0x79, 0x89, 0xe9, 0x58, 0x5c, 0xe2, 0x29, 0xe1, 0x0d, 0x9f, + 0x33, 0xc9, 0x50, 0x3e, 0x58, 0x6a, 0x4c, 0x1c, 0xff, 0xb8, 0x1e, 0x83, 0x24, 0xc7, 0x54, 0xf8, + 0x8c, 0x4b, 0x5b, 0x10, 0x67, 0xce, 0x5d, 0xb9, 0xb0, 0x1d, 0x36, 0x9b, 0x31, 0x1a, 0x72, 0x6a, + 0x12, 0xf2, 0x3d, 0x3a, 0xf6, 0x99, 0x4b, 0x25, 0xba, 0x0f, 0xe0, 0xfa, 0x36, 0x1e, 0x8f, 0x39, + 0x11, 0xc2, 0x34, 0xaa, 0x46, 0x7d, 0xcf, 0xda, 0x73, 0xfd, 0x76, 0x18, 0x40, 0x08, 0x72, 0x41, + 0x22, 0x33, 0x53, 0x35, 0xea, 0xdb, 0x96, 0x7a, 0x46, 0xdf, 0x42, 0x5e, 0xe5, 0x71, 0x98, 0x67, + 0x66, 0xab, 0x46, 0xbd, 0xd4, 0x3a, 0x6a, 0x44, 0x55, 0x34, 0x06, 0x44, 0xfe, 0xca, 0xf8, 0x74, + 0xa8, 0x01, 0x56, 0x0c, 0xad, 0xfd, 0x65, 0x40, 0xfe, 0x6c, 0x4c, 0xa8, 0x74, 0xe5, 0x02, 0x3d, + 0x80, 0xb2, 0x20, 0xfc, 0xda, 0x75, 0x88, 0x8d, 0x1d, 0x87, 0xcd, 0xa9, 0x0c, 0xbf, 0xdd, 0xdf, + 0xb2, 0x4a, 0x7a, 0xa1, 0x1d, 0xc6, 0xd1, 0x3d, 0xc8, 0x5f, 0x32, 0x21, 0x29, 0x9e, 0x11, 0x55, + 0x46, 0x80, 0x89, 0x23, 0xa8, 0x03, 0x80, 0xa5, 0xe4, 0xee, 0xdb, 0xb9, 0x24, 0xc2, 0xcc, 0x56, + 0xb3, 0xf5, 0x42, 0xab, 0x96, 0x94, 0x13, 0x7d, 0xb0, 0xd1, 0x8e, 0x41, 0x3d, 0x2a, 0xf9, 0xc2, + 0x4a, 0xb1, 0x8e, 0x9f, 0x41, 0x79, 0x65, 0x19, 0x55, 0x20, 0x3b, 0x25, 0x0b, 0xad, 0x47, 0xf0, + 0x88, 0x0e, 0x61, 0xfb, 0x1a, 0x7b, 0x73, 0x5d, 0x83, 0x15, 0xbe, 0x3c, 0xcd, 0x7c, 0x67, 0x74, + 0x2a, 0x50, 0x72, 0xf5, 0x67, 0x6c, 0x46, 0x09, 0x7b, 0x57, 0xfb, 0x3d, 0x07, 0x77, 0x46, 0x12, + 0x73, 0xd9, 0xf5, 0x5c, 0x42, 0x65, 0x3f, 0x6a, 0x9a, 0x45, 0xae, 0xd0, 0x1b, 0xb8, 0x1b, 0x37, + 0x31, 0xe9, 0x4f, 0x2c, 0xa8, 0xa1, 0x04, 0xbd, 0x9b, 0xec, 0x20, 0x26, 0xc7, 0x92, 0x1e, 0xc5, + 0xfc, 0x91, 0xa6, 0x47, 0x4b, 0xe8, 0x11, 0xdc, 0xc6, 0xbe, 0xef, 0xb9, 0x0e, 0x96, 0x2e, 0xa3, + 0x71, 0x56, 0x61, 0x66, 0xaa, 0xd9, 0xfa, 0x9e, 0x75, 0x98, 0x5a, 0x8c, 0x38, 0x02, 0x3d, 0x80, + 0x0a, 0x27, 0x0e, 0xe3, 0xe3, 0x14, 0x3e, 0xab, 0xf0, 0xe5, 0x30, 0x9e, 0x40, 0x9f, 0xc3, 0x81, + 0xc4, 0x7c, 0x42, 0xa4, 0xad, 0x77, 0xec, 0x12, 0x61, 0xe6, 0x94, 0xe8, 0xe8, 0xa6, 0xe8, 0x56, + 0x25, 0x04, 0x9f, 0xc5, 0x58, 0xf4, 0x04, 0x4a, 0x1e, 0x73, 0xb0, 0x17, 0xf1, 0x17, 0xe6, 0x76, + 0xd5, 0xd8, 0xc0, 0x2e, 0x2a, 0x64, 0x6c, 0x99, 0x98, 0x4a, 0xb4, 0x77, 0xcd, 0x9d, 0x55, 0x6a, + 0xe4, 0x6a, 0x4d, 0x8d, 0x4d, 0xfe, 0x3d, 0x94, 0x39, 0x99, 0x31, 0x49, 0x12, 0xee, 0xee, 0x46, + 0x6e, 0x29, 0x84, 0xc6, 0xe4, 0xff, 0x43, 0x41, 0xef, 0x59, 0x59, 0x30, 0xaf, 0xda, 0x0f, 0x61, + 0x68, 0x10, 0x58, 0xf0, 0x05, 0xec, 0x73, 0xdf, 0xb1, 0xaf, 0x09, 0x17, 0x2e, 0xa3, 0xc2, 0xdc, + 0x53, 0xa9, 0xef, 0x27, 0xa9, 0x2d, 0xdf, 0x89, 0x24, 0xfc, 0x49, 0x83, 0xac, 0x02, 0xf7, 0x9d, + 0xe8, 0xa5, 0xf6, 0x9b, 0x01, 0x47, 0x23, 0xc2, 0xaf, 0x09, 0x4f, 0xba, 0x8d, 0x39, 0x9e, 0x11, + 0x49, 0xf8, 0xfa, 0xfe, 0x18, 0xeb, 0xfb, 0xf3, 0x0c, 0x2a, 0x4b, 0xf2, 0x06, 0xed, 0xc9, 0x6c, + 0x6c, 0x4f, 0x39, 0x2d, 0xb0, 0x4b, 0x44, 0xed, 0x9f, 0xac, 0xf6, 0xed, 0x4a, 0x31, 0x81, 0x6f, + 0x37, 0x5a, 0xcb, 0xf8, 0x80, 0xb5, 0x66, 0x70, 0x98, 0x98, 0xdd, 0x8f, 0xb7, 0xa4, 0x6b, 0x7a, + 0x9a, 0xd4, 0xb4, 0xe1, 0xab, 0x8d, 0x35, 0x7a, 0x84, 0xe7, 0xf7, 0xd6, 0xe5, 0x1a, 0xa5, 0x8e, + 0x20, 0xef, 0x52, 0xfb, 0xed, 0x22, 0x1c, 0x05, 0x46, 0x7d, 0xdf, 0xda, 0x75, 0x69, 0x27, 0x78, + 0x5d, 0xe3, 0x9e, 0xdc, 0x7f, 0x70, 0xcf, 0xf6, 0x47, 0xbb, 0x67, 0xd5, 0x1c, 0x3b, 0x9f, 0x6a, + 0x8e, 0xe3, 0x29, 0x98, 0x9b, 0x54, 0x48, 0x8f, 0xa9, 0xed, 0x70, 0x4c, 0x3d, 0x49, 0x8f, 0xa9, + 0x42, 0xeb, 0xf3, 0x94, 0xc4, 0x9b, 0x0c, 0x96, 0x9a, 0x65, 0xb5, 0x6f, 0xe0, 0xce, 0x80, 0xbc, + 0x4f, 0x26, 0xd6, 0x8f, 0x44, 0x08, 0x3c, 0x51, 0x06, 0x48, 0x8b, 0x6b, 0x2c, 0x89, 0x5b, 0xfb, + 0xd3, 0x80, 0x62, 0x4c, 0xe1, 0x01, 0xf8, 0x14, 0xf6, 0x1d, 0x35, 0xfb, 0x6c, 0x11, 0x74, 0x56, + 0x11, 0x0a, 0xad, 0xcf, 0x56, 0x1a, 0x7e, 0x73, 0x3c, 0xf6, 0xb7, 0xac, 0x42, 0x48, 0x54, 0x80, + 0x20, 0x8f, 0x50, 0x75, 0xeb, 0x3c, 0x99, 0xb5, 0x79, 0x6e, 0x1a, 0x27, 0xc8, 0x13, 0x12, 0xc3, + 0x3c, 0x8f, 0x21, 0x47, 0xc9, 0x7b, 0xa9, 0x5c, 0xb1, 0xc4, 0xdf, 0xb0, 0xdb, 0xfe, 0x96, 0xa5, + 0x08, 0x9d, 0x02, 0xec, 0x71, 0x72, 0xa5, 0xe7, 0xfa, 0xdf, 0x19, 0xa8, 0xa4, 0xf7, 0x29, 0xe6, + 0x9e, 0x44, 0x5f, 0xc3, 0xe1, 0xba, 0x83, 0xa1, 0xff, 0x1d, 0xb7, 0xd6, 0x9c, 0x0b, 0xf4, 0x25, + 0x94, 0x57, 0x4e, 0xb4, 0xfe, 0xab, 0x94, 0x96, 0x0f, 0x74, 0xa0, 0xf9, 0x94, 0x2c, 0xec, 0x31, + 0x96, 0x38, 0x32, 0xf4, 0x94, 0x2c, 0x4e, 0xb0, 0xc4, 0xe8, 0x31, 0x14, 0x7d, 0x42, 0x78, 0x32, + 0x48, 0x73, 0x1b, 0x07, 0xe9, 0x7e, 0x00, 0xbc, 0x39, 0x47, 0x3f, 0x7d, 0x04, 0x3f, 0x84, 0x83, + 0x29, 0x21, 0xbe, 0xed, 0x5c, 0x62, 0x4a, 0x89, 0x67, 0x33, 0x9f, 0x50, 0xe5, 0xe8, 0xbc, 0x55, + 0x0e, 0x16, 0xba, 0x61, 0xfc, 0xdc, 0x27, 0x14, 0x9d, 0xc1, 0x81, 0xaa, 0x6f, 0xc9, 0xfd, 0xbb, + 0x1f, 0xe3, 0xfe, 0x72, 0xc0, 0xb3, 0x52, 0xe3, 0xf1, 0x45, 0x5a, 0xf5, 0x91, 0xc4, 0x72, 0xae, + 0x2e, 0x26, 0x0e, 0x1b, 0x13, 0xa5, 0x72, 0xd1, 0x52, 0xcf, 0xc8, 0x84, 0xdd, 0x31, 0x91, 0xd8, + 0x55, 0xff, 0xbb, 0x40, 0xce, 0xe8, 0xb5, 0xf6, 0x87, 0x01, 0xa5, 0xa5, 0xc6, 0xf9, 0xc1, 0xc5, + 0x87, 0xcd, 0xa5, 0xfd, 0x2e, 0x38, 0x05, 0x91, 0xa1, 0xf7, 0xd8, 0x5c, 0x9e, 0xaa, 0x00, 0xfa, + 0x02, 0x4a, 0xca, 0xea, 0xb6, 0xc3, 0xa8, 0x98, 0xcf, 0xc8, 0x58, 0xa5, 0x2c, 0x5a, 0x45, 0x15, + 0xed, 0xea, 0x20, 0x6a, 0xc1, 0x0e, 0x57, 0x36, 0xd0, 0xce, 0x3a, 0x5e, 0xf3, 0xe3, 0xd6, 0x46, + 0xb1, 0x34, 0x32, 0xe0, 0x08, 0xb5, 0x09, 0xdd, 0xb2, 0xb5, 0x9c, 0x70, 0x9b, 0x96, 0x46, 0x3e, + 0xfc, 0x01, 0x0e, 0x6e, 0x5c, 0x04, 0x50, 0x0d, 0xfe, 0xd7, 0x6f, 0x0f, 0x4e, 0x46, 0xfd, 0xf6, + 0xcb, 0x9e, 0x3d, 0xb4, 0xce, 0x2f, 0xce, 0xbb, 0xe7, 0xaf, 0xec, 0xd7, 0x83, 0xd1, 0xb0, 0xd7, + 0x3d, 0x3b, 0x3d, 0xeb, 0x9d, 0x54, 0xb6, 0xd0, 0x2e, 0x64, 0x2f, 0x5e, 0x8d, 0x2a, 0x06, 0xca, + 0x43, 0xae, 0xfd, 0xea, 0x62, 0x54, 0xc9, 0x3c, 0xec, 0x41, 0x79, 0xe5, 0x96, 0x86, 0xaa, 0x70, + 0x6f, 0xd0, 0xbb, 0xf8, 0xf9, 0xdc, 0x7a, 0xf9, 0xa1, 0x3c, 0xdd, 0x61, 0xc5, 0x08, 0x1e, 0x5e, + 0x9f, 0x0c, 0x2b, 0x99, 0xd6, 0x9b, 0x54, 0x49, 0x7c, 0x14, 0xde, 0xd9, 0xd0, 0x29, 0x14, 0x4e, + 0x58, 0x1c, 0x46, 0x77, 0xd6, 0xcb, 0x71, 0x75, 0x6c, 0x6e, 0xd0, 0xc9, 0xaf, 0x6d, 0xd5, 0x8d, + 0xaf, 0x8c, 0xce, 0x14, 0x6e, 0xbb, 0x2c, 0xc4, 0x60, 0x4f, 0x8a, 0x86, 0x4b, 0x25, 0xe1, 0x14, + 0x7b, 0x9d, 0x72, 0x02, 0x57, 0xd5, 0x0f, 0x8d, 0x5f, 0x9e, 0x4f, 0x18, 0x9b, 0x78, 0xa4, 0x31, + 0x61, 0x1e, 0xa6, 0x93, 0x06, 0xe3, 0x93, 0xa6, 0xba, 0x0a, 0x3b, 0x9c, 0x28, 0xe3, 0x62, 0x4f, + 0x34, 0x83, 0x24, 0xcd, 0x28, 0x49, 0x53, 0x9d, 0x3a, 0x05, 0xb2, 0x27, 0x8e, 0xff, 0x76, 0x47, + 0xbd, 0x3f, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x37, 0x34, 0x9b, 0x67, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go new file mode 100644 index 000000000..27510d4de --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/transport_security_common.proto + +package grpc_gcp // import "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The security level of the created channel. The list is sorted in increasing +// level of security. This order must always be maintained. +type SecurityLevel int32 + +const ( + SecurityLevel_SECURITY_NONE SecurityLevel = 0 + SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 + SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 +) + +var SecurityLevel_name = map[int32]string{ + 0: "SECURITY_NONE", + 1: "INTEGRITY_ONLY", + 2: "INTEGRITY_AND_PRIVACY", +} +var SecurityLevel_value = map[string]int32{ + "SECURITY_NONE": 0, + "INTEGRITY_ONLY": 1, + "INTEGRITY_AND_PRIVACY": 2, +} + +func (x SecurityLevel) String() string { + return proto.EnumName(SecurityLevel_name, int32(x)) +} +func (SecurityLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_transport_security_common_71945991f2c3b4a6, []int{0} +} + +// Max and min supported RPC protocol versions. +type RpcProtocolVersions struct { + // Maximum supported RPC version. + MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` + // Minimum supported RPC version. + MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions) Reset() { *m = RpcProtocolVersions{} } +func (m *RpcProtocolVersions) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions) ProtoMessage() {} +func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_transport_security_common_71945991f2c3b4a6, []int{0} +} +func (m *RpcProtocolVersions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions.Unmarshal(m, b) +} +func (m *RpcProtocolVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions.Marshal(b, m, deterministic) +} +func (dst *RpcProtocolVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions.Merge(dst, src) +} +func (m *RpcProtocolVersions) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions.Size(m) +} +func (m *RpcProtocolVersions) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions proto.InternalMessageInfo + +func (m *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MaxRpcVersion + } + return nil +} + +func (m *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MinRpcVersion + } + return nil +} + +// RPC version contains a major version and a minor version. +type RpcProtocolVersions_Version struct { + Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions_Version) Reset() { *m = RpcProtocolVersions_Version{} } +func (m *RpcProtocolVersions_Version) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions_Version) ProtoMessage() {} +func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { + return fileDescriptor_transport_security_common_71945991f2c3b4a6, []int{0, 0} +} +func (m *RpcProtocolVersions_Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions_Version.Unmarshal(m, b) +} +func (m *RpcProtocolVersions_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions_Version.Marshal(b, m, deterministic) +} +func (dst *RpcProtocolVersions_Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions_Version.Merge(dst, src) +} +func (m *RpcProtocolVersions_Version) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions_Version.Size(m) +} +func (m *RpcProtocolVersions_Version) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions_Version proto.InternalMessageInfo + +func (m *RpcProtocolVersions_Version) GetMajor() uint32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *RpcProtocolVersions_Version) GetMinor() uint32 { + if m != nil { + return m.Minor + } + return 0 +} + +func init() { + proto.RegisterType((*RpcProtocolVersions)(nil), "grpc.gcp.RpcProtocolVersions") + proto.RegisterType((*RpcProtocolVersions_Version)(nil), "grpc.gcp.RpcProtocolVersions.Version") + proto.RegisterEnum("grpc.gcp.SecurityLevel", SecurityLevel_name, SecurityLevel_value) +} + +func init() { + proto.RegisterFile("grpc/gcp/transport_security_common.proto", fileDescriptor_transport_security_common_71945991f2c3b4a6) +} + +var fileDescriptor_transport_security_common_71945991f2c3b4a6 = []byte{ + // 323 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x41, 0x4b, 0x3b, 0x31, + 0x10, 0xc5, 0xff, 0x5b, 0xf8, 0xab, 0x44, 0x56, 0xeb, 0x6a, 0x41, 0xc5, 0x83, 0x08, 0x42, 0xf1, + 0x90, 0x05, 0xc5, 0xb3, 0xb4, 0xb5, 0x48, 0xa1, 0x6e, 0xeb, 0xb6, 0x16, 0xea, 0x25, 0xc4, 0x18, + 0x42, 0x24, 0x9b, 0x09, 0xb3, 0xb1, 0xd4, 0xaf, 0xec, 0xa7, 0x90, 0x4d, 0xbb, 0x14, 0xc1, 0x8b, + 0xb7, 0xbc, 0xc7, 0xcc, 0x6f, 0x32, 0xf3, 0x48, 0x5b, 0xa1, 0x13, 0xa9, 0x12, 0x2e, 0xf5, 0xc8, + 0x6d, 0xe9, 0x00, 0x3d, 0x2b, 0xa5, 0xf8, 0x40, 0xed, 0x3f, 0x99, 0x80, 0xa2, 0x00, 0x4b, 0x1d, + 0x82, 0x87, 0x64, 0xa7, 0xaa, 0xa4, 0x4a, 0xb8, 0x8b, 0xaf, 0x88, 0x1c, 0xe6, 0x4e, 0x8c, 0x2b, + 0x5b, 0x80, 0x99, 0x49, 0x2c, 0x35, 0xd8, 0x32, 0x79, 0x24, 0xfb, 0x05, 0x5f, 0x32, 0x74, 0x82, + 0x2d, 0x56, 0xde, 0x71, 0x74, 0x1e, 0xb5, 0x77, 0xaf, 0x2f, 0x69, 0xdd, 0x4b, 0x7f, 0xe9, 0xa3, + 0xeb, 0x47, 0x1e, 0x17, 0x7c, 0x99, 0x3b, 0xb1, 0x96, 0x01, 0xa7, 0xed, 0x0f, 0x5c, 0xe3, 0x6f, + 0x38, 0x6d, 0x37, 0xb8, 0xd3, 0x5b, 0xb2, 0x5d, 0x93, 0x8f, 0xc8, 0xff, 0x82, 0xbf, 0x03, 0x86, + 0xef, 0xc5, 0xf9, 0x4a, 0x04, 0x57, 0x5b, 0xc0, 0x30, 0xa5, 0x72, 0x2b, 0x71, 0xf5, 0x44, 0xe2, + 0xc9, 0xfa, 0x1e, 0x43, 0xb9, 0x90, 0x26, 0x39, 0x20, 0xf1, 0xa4, 0xdf, 0x7b, 0xce, 0x07, 0xd3, + 0x39, 0xcb, 0x46, 0x59, 0xbf, 0xf9, 0x2f, 0x49, 0xc8, 0xde, 0x20, 0x9b, 0xf6, 0x1f, 0x82, 0x37, + 0xca, 0x86, 0xf3, 0x66, 0x94, 0x9c, 0x90, 0xd6, 0xc6, 0xeb, 0x64, 0xf7, 0x6c, 0x9c, 0x0f, 0x66, + 0x9d, 0xde, 0xbc, 0xd9, 0xe8, 0x2e, 0x49, 0x4b, 0xc3, 0x6a, 0x07, 0x6e, 0x7c, 0x49, 0xb5, 0xf5, + 0x12, 0x2d, 0x37, 0xdd, 0xb3, 0x69, 0x9d, 0x41, 0x3d, 0xb2, 0x17, 0x12, 0x08, 0x2b, 0x8e, 0xa3, + 0x97, 0x3b, 0x05, 0xa0, 0x8c, 0xa4, 0x0a, 0x0c, 0xb7, 0x8a, 0x02, 0xaa, 0x34, 0xc4, 0x27, 0x50, + 0xbe, 0x49, 0xeb, 0x35, 0x37, 0x65, 0x5a, 0x11, 0xd3, 0x9a, 0x98, 0x86, 0xe8, 0x42, 0x11, 0x53, + 0xc2, 0xbd, 0x6e, 0x05, 0x7d, 0xf3, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x31, 0x14, 0xb4, 0x11, 0xf6, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/utils.go b/vendor/google.golang.org/grpc/credentials/alts/utils.go new file mode 100644 index 000000000..4ed27c605 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/utils.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package alts + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + + "google.golang.org/grpc/peer" +) + +const ( + linuxProductNameFile = "/sys/class/dmi/id/product_name" + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +type platformError string + +func (k platformError) Error() string { + return fmt.Sprintf("%s is not supported", string(k)) +} + +var ( + // The following two variables will be reassigned in tests. + runningOS = runtime.GOOS + manufacturerReader = func() (io.Reader, error) { + switch runningOS { + case "linux": + return os.Open(linuxProductNameFile) + case "windows": + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return strings.NewReader(name), nil + } + } + + return nil, errors.New("cannot determine the machine's manufacturer") + default: + return nil, platformError(runningOS) + } + } + vmOnGCP bool +) + +// isRunningOnGCP checks whether the local system, without doing a network request is +// running on GCP. +func isRunningOnGCP() bool { + manufacturer, err := readManufacturer() + if err != nil { + log.Fatalf("failure to read manufacturer information: %v", err) + } + name := string(manufacturer) + switch runningOS { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + log.Fatal(platformError(runningOS)) + } + return false +} + +func readManufacturer() ([]byte, error) { + reader, err := manufacturerReader() + if err != nil { + return nil, err + } + if reader == nil { + return nil, errors.New("got nil reader") + } + manufacturer, err := ioutil.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) + } + return manufacturer, nil +} + +// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, +// if it exists. This API should be used by gRPC server RPC handlers to get +// information about the communicating peer. For client-side, use grpc.Peer() +// CallOption. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} + +// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it +// exists. This API should be used by gRPC clients after obtaining a peer object +// using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + altsAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no alts.AuthInfo found in Peer") + } + return altsAuthInfo, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..8ea3d4a1d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,336 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/credentials/internal" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + colonPos := strings.LastIndex(authority, ":") + if colonPos == -1 { + colonPos = len(authority) + } + cfg.ServerName = authority[:colonPos] + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + return nil, nil, err + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +const alpnProtoStrH2 = "h2" + +func appendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go new file mode 100644 index 000000000..04b349abc --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package google defines credentials for google cloud services. +package google + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/alts" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +const tokenRequestTimeout = 30 * time.Second + +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + perRPCCreds, err := oauth.NewApplicationDefault(ctx) + if err != nil { + grpclog.Warningf("google default creds: failed to create application oauth: %v", err) + } + return perRPCCreds + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("google default creds: failed to create new creds: %v", err) + } + return bundle +} + +// NewComputeEngineCredentials returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. Authentication configured +// by this API represents the GCE VM's default service account. +// +// This API is experimental. +func NewComputeEngineCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + return oauth.NewComputeEngine() + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("compute engine creds: failed to create new creds: %v", err) + } + return bundle +} + +// creds implements credentials.Bundle. +type creds struct { + // Supported modes are defined in internal/internal.go. + mode string + // The transport credentials associated with this bundle. + transportCreds credentials.TransportCredentials + // The per RPC credentials associated with this bundle. + perRPCCreds credentials.PerRPCCredentials + // Creates new per RPC credentials + newPerRPCCreds func() credentials.PerRPCCredentials +} + +func (c *creds) TransportCredentials() credentials.TransportCredentials { + return c.transportCreds +} + +func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { + if c == nil { + return nil + } + return c.perRPCCreds +} + +// NewWithMode should make a copy of Bundle, and switch mode. Modifying the +// existing Bundle may cause races. +func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { + newCreds := &creds{ + mode: mode, + newPerRPCCreds: c.newPerRPCCreds, + } + + // Create transport credentials. + switch mode { + case internal.CredsBundleModeFallback: + newCreds.transportCreds = credentials.NewTLS(nil) + case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: + // Only the clients can use google default credentials, so we only need + // to create new ALTS client creds here. + newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) + default: + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + + if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { + newCreds.perRPCCreds = newCreds.newPerRPCCreds() + } + + return newCreds, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go new file mode 100644 index 000000000..2f4472bec --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go @@ -0,0 +1,61 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains credentials-internal code. +package internal + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go new file mode 100644 index 000000000..d4346e9ea --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go @@ -0,0 +1,30 @@ +// +build appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// WrapSyscallConn returns newConn on appengine. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + return newConn +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 000000000..e0e74d815 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "context" + "fmt" + "io/ioutil" + "sync" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/tls13.go new file mode 100644 index 000000000..ccbf35b33 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls13.go @@ -0,0 +1,30 @@ +// +build go1.12 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +// This init function adds cipher suite constants only defined in Go 1.12. +func init() { + cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" + cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" + cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 000000000..69c003159 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,558 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs backoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + // This is to support grpclb. + resolverBuilder resolver.Builder + reqHandshake envconfig.RequireHandshakeSetting + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// This API is EXPERIMENTAL. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWaitForHandshake blocks until the initial settings frame is received from +// the server before assigning RPCs to the connection. +// +// Deprecated: this is the default behavior, and this option will be removed +// after the 1.18 release. +func WithWaitForHandshake() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.reqHandshake = envconfig.RequireHandshakeOn + }) +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and +// WithBalancerName. +func WithBalancer(b Balancer) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// This is an EXPERIMENTAL API. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// withResolverBuilder is only for grpclb. +func withResolverBuilder(b resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolverBuilder = b + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver, as +// specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + return withBackoff(backoff.Exponential{ + MaxDelay: b.MaxDelay, + }) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// This API is experimental. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext and context.WithTimeout instead. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithResolverBuilder = withResolverBuilder + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// 1. WithDisableServiceConfig is called. +// 2. Resolver does not return service config or if the resolver gets and invalid config. +// +// This API is EXPERIMENTAL. +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// This API is EXPERIMENTAL. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// This API is EXPERIMENTAL. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + reqHandshake: envconfig.RequireHandshake, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..187adbb11 --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000..30a75da99 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000..66b97a6f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..51bb9457c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..097494f71 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..d49325776 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..8b7350022 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..1bd0cce5a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +// +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +const ( + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay = 1.0 * time.Second + // factor is applied to the backoff after each retry. + factor = 1.6 + // jitter provides a range to randomize backoff delays. + jitter = 0.2 +) + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 000000000..3a905d966 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 000000000..fee6aecd0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment varialbe or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := parseMethodName(methodName) + if err != nil { + grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 000000000..1ee00a39a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 000000000..4cc2525df --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/grpclog" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the privous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclog.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 000000000..160f6e861 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,423 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: defaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclog.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 000000000..20d044f0f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" +) + +var ( + defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// SetDefaultSink sets the sink where binary logs will be written to. +// +// Not thread safe. Only set during initialization. +func SetDefaultSink(s Sink) { + if defaultSink != nil { + defaultSink.Close() + } + defaultSink = s +} + +// Sink writes log entry into the binary log sink. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshalls the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) *writerSink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufWriteCloserSink struct { + mu sync.Mutex + closer io.Closer + out *writerSink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + + writeStartOnce sync.Once + writeTicker *time.Ticker +} + +func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { + // Start the write loop when Write is called. + fs.writeStartOnce.Do(fs.startFlushGoroutine) + fs.mu.Lock() + if err := fs.out.Write(e); err != nil { + fs.mu.Unlock() + return err + } + fs.mu.Unlock() + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufWriteCloserSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for range fs.writeTicker.C { + fs.mu.Lock() + fs.buf.Flush() + fs.mu.Unlock() + } + }() +} + +func (fs *bufWriteCloserSink) Close() error { + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + fs.mu.Lock() + fs.buf.Flush() + fs.closer.Close() + fs.out.Close() + fs.mu.Unlock() + return nil +} + +func newBufWriteCloserSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufWriteCloserSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + } +} + +// NewTempFileSink creates a temp file and returns a Sink that writes to this +// file. +func NewTempFileSink() (Sink, error) { + tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %v", err) + } + return newBufWriteCloserSink(tempFile), nil +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go new file mode 100644 index 000000000..15dc7803d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/util.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "strings" +) + +// parseMethodName splits service and method from the input. It expects format +// "/service/method". +// +// TODO: move to internal/grpcutil. +func parseMethodName(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..f0744f993 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() (cleanup func() error) { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(id int64, desc *TraceEventDesc) { + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000..17c2274cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,702 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUNKNOWN indicates unknown severity of a trace event. + CtUNKNOWN Severity = iota + // CtINFO indicates info level severity of a trace event. + CtINFO + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 000000000..692dd6181 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 000000000..79edbefc4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,44 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 000000000..fdf409d55 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,39 @@ +// +build linux,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 000000000..8864a0811 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,26 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..11be7cd08 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE" +) + +// RequireHandshakeSetting describes the settings for handshaking. +type RequireHandshakeSetting int + +const ( + // RequireHandshakeOn indicates to wait for handshake before considering a + // connection ready/successful. + RequireHandshakeOn RequireHandshakeSetting = iota + // RequireHandshakeOff indicates to not wait for handshake before + // considering a connection ready/successful. + RequireHandshakeOff +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE + // environment variable. + // + // Will be removed after the 1.18 release. + RequireHandshake = RequireHandshakeOn +) + +func init() { + switch strings.ToLower(os.Getenv(requireHandshakeStr)) { + case "on": + fallthrough + default: + RequireHandshake = RequireHandshakeOn + case "off": + RequireHandshake = RequireHandshakeOff + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000..200b115ca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 000000000..fbe697c37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..bc1f99ac8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,71 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" +) + +var ( + // WithResolverBuilder is exported by dialoptions.go + WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption + // WithHealthCheckFunc is not exported by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // ParseServiceConfig is a function to parse JSON service configs into + // opaque data structures. + ParseServiceConfig func(sc string) (interface{}, error) + // StatusRawProto is exported by status/status.go. This func returns a + // pointer to the wrapped Status proto for a given status.Status without a + // call to proto.Clone(). The returned Status proto should not be mutated by + // the caller. + StatusRawProto interface{} // func (*status.Status) *spb.Status +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 000000000..43281a3e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,114 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + grpclog.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux non-appengine environment. +type Rusage syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() (rusage *Rusage) { + rusage = new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) + return +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + f := (*syscall.Rusage)(first) + l := (*syscall.Rusage)(latest) + var ( + utimeDiffs = l.Utime.Sec - f.Utime.Sec + utimeDiffus = l.Utime.Usec - f.Utime.Usec + stimeDiffs = l.Stime.Sec - f.Stime.Sec + stimeDiffus = l.Stime.Usec - f.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 000000000..d3fd9dab3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,73 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +func log() { + once.Do(func() { + grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +// It always returns 0 under non-linux or appengine environment. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux or appengine environment. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux or appengine environment. +func GetRusage() (rusage *Rusage) { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux or appengine environment. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// a negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 000000000..070680edb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 000000000..204ba1588 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,852 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) (bool, error) // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type incomingSettings struct { + ss []http2.Setting +} + +type outgoingSettings struct { + ss []http2.Setting +} + +type incomingGoAway struct { +} + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +type ping struct { + ack bool + data [8]byte +} + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +func (c *controlBuffer) put(it interface{}) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue() + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + sendPing, err := hdr.initStream(str.id) + if err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + if sendPing { + return l.pingHandler(&ping{data: [8]byte{}}) + } + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { // connection-level flow control. + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 000000000..9fa306b2e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 000000000..5ea997a7e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,218 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + f.mu.Unlock() + return f.delta + } + f.mu.Unlock() + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 000000000..78f9ddc3d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,431 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + } + ht.Close() + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 000000000..c96178d74 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1403 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + // awakenKeepalive is used to wake up keepalive when after it has gone dormant. + awakenKeepalive chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + // Boolean to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + scheme = "https" + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + // Make sure awakenKeepalive can't be written upon. + // keepalive routine will make it writable, if need be. + t.awakenKeepalive <- struct{}{} + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + return pr +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + authData, err := t.getTrAuthData(ctx, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) (bool, error) { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return false, err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + var sendPing bool + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 && t.keepaliveEnabled { + select { + case t.awakenKeepalive <- struct{}{}: + sendPing = true + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: + } + } + t.mu.Unlock() + return sendPing, nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + t.onClose() + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + // Our deadline was already exceeded, and that was likely the cause of + // this cancelation. Alter the status code accordingly. + if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) { + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.state = draining + t.controlBuf.put(&incomingGoAway{}) + + // This has to be a new goroutine because we're still using the current goroutine to read in the transport. + t.onGoAway(t.goAwayReason) + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + state := &decodeState{} + // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. + state.data.isGRPC = !initialHeader + if err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + return + } + + isHeader := false + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.data.encoding + if len(state.data.mdata) > 0 { + s.header = state.data.mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if !endStream { + return + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + msg := t.framer.fr.ErrorDetail().Error() + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + // Check if keepalive should go dormant. + t.mu.Lock() + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // Make awakenKeepalive writable. + <-t.awakenKeepalive + t.mu.Unlock() + select { + case <-t.awakenKeepalive: + // If the control gets here a ping has been sent + // need to reset the timer with keepalive.Timeout. + case <-t.ctx.Done(): + return + } + } else { + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + // Send ping. + t.controlBuf.put(p) + } + + // By the time control gets here a ping has been sent one way or the other. + timer.Reset(t.kp.Timeout) + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + t.Close() + return + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 000000000..150b73e46 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1219 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + // statusRawProto is a function to get to the raw status proto wrapped in a + // status.Status without a proto.Clone(). + statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) +) + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + ctx context.Context + ctxDone <-chan struct{} // Cache the context.Done() chan + cancel context.CancelFunc + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Flag to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + var isettings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + ctx, cancel := context.WithCancel(context.Background()) + t := &http2Server{ + ctx: ctx, + cancel: cancel, + ctxDone: ctx.Done(), + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + state := &decodeState{ + serverSide: true, + } + if err := state.decodeHeader(frame); err != nil { + if se, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code()], + onWrite: func() {}, + }) + } + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.data.encoding, + method: state.data.method, + contentSubtype: state.data.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.data.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.data.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + } + if state.data.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + } + if state.data.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.data.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return false + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + frame, err := t.framer.fr.ReadFrame() + atomic.StoreUint32(&t.activity, 1) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + return false + } + } + return true +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: WireLength is not set in outHeader. + // TODO(mmukhi): Revisit this later, if needed. + outHeader := &stats.OutHeader{} + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + // Add some data to header frame so that we can equally distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + var pingSent bool + maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) + maxAge := time.NewTimer(t.kp.MaxConnectionAge) + keepalive := time.NewTimer(t.kp.Time) + // NOTE: All exit paths of this function should reset their + // respective timers. A failure to do so will cause the + // following clean-up to deadlock and eventually leak. + defer func() { + if !maxIdle.Stop() { + <-maxIdle.C + } + if !maxAge.Stop() { + <-maxAge.C + } + if !keepalive.Stop() { + <-keepalive.C + } + }() + for { + select { + case <-maxIdle.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + maxIdle.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + // Resetting the timer so that the clean-up doesn't deadlock. + maxIdle.Reset(infinity) + return + } + maxIdle.Reset(val) + case <-maxAge.C: + t.drain(http2.ErrCodeNo, []byte{}) + maxAge.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-maxAge.C: + // Close the connection after grace period. + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + maxAge.Reset(infinity) + case <-t.ctx.Done(): + } + return + case <-keepalive.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + pingSent = false + keepalive.Reset(t.kp.Time) + continue + } + if pingSent { + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + keepalive.Reset(infinity) + return + } + pingSent = true + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + keepalive.Reset(t.kp.Timeout) + case <-t.ctx.Done(): + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 000000000..9d212867c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,676 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +type parsedHeaderData struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string + + // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). + // + // We are in gRPC mode (peer speaking gRPC) if: + // * We are client side and have already received a HEADER frame that indicates gRPC peer. + // * The header contains valid a content-type, i.e. a string starts with "application/grpc" + // And we should handle error specific to gRPC. + // + // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we + // are in HTTP fallback mode, and should handle error specific to HTTP. + isGRPC bool + grpcErr error + httpErr error + contentTypeErr string +} + +// decodeState configures decoding criteria and records the decoded data. +type decodeState struct { + // whether decoding on server side or not + serverSide bool + + // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS + // frame once decodeHeader function has been invoked and returned. + data parsedHeaderData +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} + +func (d *decodeState) status() *status.Status { + if d.data.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + } + return d.data.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return status.Error(codes.Internal, "peer header list size exceeded limit") + } + + for _, hf := range frame.Fields { + d.processHeaderField(hf) + } + + if d.data.isGRPC { + if d.data.grpcErr != nil { + return d.data.grpcErr + } + if d.serverSide { + return nil + } + if d.data.rawStatusCode == nil && d.data.statusGen == nil { + // gRPC status doesn't exist. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.data.rawStatusCode = &code + } + return nil + } + + // HTTP fallback mode + if d.data.httpErr != nil { + return d.data.httpErr + } + + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + ok bool + ) + + if d.data.httpStatus != nil { + code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if !ok { + code = codes.Unknown + } + } + + return status.Error(code, d.constructHTTPErrMsg()) +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func (d *decodeState) constructHTTPErrMsg() string { + var errMsgs []string + + if d.data.httpStatus == nil { + errMsgs = append(errMsgs, "malformed header: missing HTTP status") + } else { + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) + } + + if d.data.contentTypeErr == "" { + errMsgs = append(errMsgs, "transport: missing content-type field") + } else { + errMsgs = append(errMsgs, d.data.contentTypeErr) + } + + return strings.Join(errMsgs, "; ") +} + +func (d *decodeState) addMetadata(k, v string) { + if d.data.mdata == nil { + d.data.mdata = make(map[string][]string) + } + d.data.mdata[k] = append(d.data.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) + return + } + d.data.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + d.data.isGRPC = true + case "grpc-encoding": + d.data.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.data.rawStatusCode = &code + case "grpc-message": + d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + d.data.statusGen = status.FromProto(s) + case "grpc-timeout": + d.data.timeoutSet = true + var err error + if d.data.timeout, err = decodeTimeout(f.Value); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.data.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) + return + } + d.data.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + return + } + d.data.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + return + } + d.data.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return + } + d.addMetadata(f.Name, v) + } +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go new file mode 100644 index 000000000..879df80c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/log.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 000000000..0f33c9ca8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,795 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() error { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return nil + } + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-s.headerChan: + return nil + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + if err := s.waitOnHeader(); err != nil { + return "" + } + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil && s.header != nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + err := s.waitOnHeader() + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + if s.header == nil { + return nil, nil + } + return s.header.Copy(), nil + default: + } + return nil, err +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. If a context error happens +// first, returns it as a status error. Client-side only. +func (s *Stream) TrailersOnly() (bool, error) { + err := s.waitOnHeader() + if err != nil { + return false, err + } + return s.noHeaders, nil +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..34d31b5e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..cf6d1b947 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 000000000..c9f79dc53 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,293 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") + + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exists until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 000000000..f4c1c8b68 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// +// This package is deprecated: please use package resolver instead. +package naming + +// Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..e01d219ff --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..45baa2ae1 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + + // The latest connection happened. + connErrMu sync.Mutex + connErr error +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +func (bp *pickerWrapper) updateConnectionError(err error) { + bp.connErrMu.Lock() + bp.connErr = err + bp.connErrMu.Unlock() +} + +func (bp *pickerWrapper) connectionError() error { + bp.connErrMu.Lock() + err := bp.connErr + bp.connErrMu.Unlock() + return err +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + if connectionErr := bp.connectionError(); connectionErr != nil { + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr) + case context.Canceled: + return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr) + } + } + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p := bp.picker + bp.mu.Unlock() + + subConn, done, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, err.Error()) + default: + if _, ok := status.FromError(err); ok { + return nil, nil, err + } + // err is some other error. + return nil, nil, status.Error(codes.Unknown, err.Error()) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, done), nil + } + return t, done, nil + } + if done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + done(balancer.DoneInfo{}) + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000..d1e38aad7 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + //TODO(yuxuanli): why not change the cc state to Idle? + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(addrs) + b.sc.Connect() + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + return + } + if s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 000000000..76acbbcc9 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// This API is EXPERIMENTAL. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 000000000..f8f69bfb7 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + if url == nil { + return nil, errDisabled + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var newAddr string + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + newAddr = addr + } else { + newAddr = proxyURL.Host + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) + } + return + } +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..297492e87 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,457 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{minFreq: defaultFreq} +} + +type dnsBuilder struct { + // minimum frequency of polling the DNS server. + minFreq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.minFreq, + backoff: backoff.Exponential{MaxDelay: b.minFreq}, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + backoff backoff.Exponential + retryCount int + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + if !d.t.Stop() { + // Before resetting a timer, it should be stopped to prevent racing with + // reads on it's channel. + <-d.t.C + } + } + + result, sc := d.lookup() + // Next lookup should happen within an interval defined by d.freq. It may be + // more often due to exponential retry on empty address list. + if len(result) == 0 { + d.retryCount++ + d.t.Reset(d.backoff.Backoff(d.retryCount)) + } else { + d.retryCount = 0 + d.t.Reset(d.freq) + } + d.cc.NewServiceConfig(sc) + d.cc.NewAddress(result) + + // Sleep to prevent excessive re-resolutions. Incoming resolution requests + // will be queued in d.rn. + t := time.NewTimer(minDNSResRate) + select { + case <-t.C: + case <-d.ctx.Done(): + t.Stop() + return + } + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + newAddrs := d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + if d.disableServiceConfig { + return newAddrs, "" + } + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..893d5d12c --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..e83da346a --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // + // e.g. if Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { + // DisableServiceConfig indicates whether resolver should fetch service config data. + DisableServiceConfig bool +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + Addresses []Address // Resolved addresses for the target + // ServiceConfig is the parsed service config; obtained from + // serviceconfig.Parse. + ServiceConfig serviceconfig.Config + + // TODO: add Err error +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000..6934905b0 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,168 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync/atomic" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done uint32 // accessed atomically; set to 1 when closed. + curState resolver.State +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme and builds the resolver. The monitoring goroutine +// for it is not started yet and can be created by calling start(). +// +// If withResolverBuilder dial option is set, the specified resolver will be +// used instead. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + rb := cc.dopts.resolverBuilder + if rb == nil { + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + } + + var err error + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { + ccr.resolver.ResolveNow(o) +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + atomic.StoreUint32(&ccr.done, 1) +} + +func (ccr *ccResolverWrapper) isDone() bool { + return atomic.LoadUint32(&ccr.done) == 1 +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { + if ccr.isDone() { + return + } + grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.cc.updateResolverState(s) + ccr.curState = s +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + if ccr.isDone() { + return + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + if ccr.isDone() { + return + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + c, err := parseServiceConfig(sc) + if err != nil { + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c}) + } + ccr.curState.ServiceConfig = c + ccr.cc.updateResolverState(ccr.curState) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig) + newSC, newOK := s.ServiceConfig.(*ServiceConfig) + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtINFO, + }) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..088c3f1b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,863 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + stream ClientStream + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x + } + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// ForceCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// This is an EXPERIMENTAL API. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// This API is EXPERIMENTAL. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// This is an EXPERIMENTAL API. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + } + if len(d) > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + return d, nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 5. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..176613625 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1524 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + m map[string]*service // service name -> service info + events trace.EventLog + + quit chan struct{} + done chan struct{} + quitOnce sync.Once + doneOnce sync.Once + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// This API is EXPERIMENTAL. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation bypasses interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), + czData: new(channelzData), + } + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + select { + // Stop or GracefulStop called; block until done and return nil. + case <-s.quit: + <-s.done + default: + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit: + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + select { + case <-s.quit: + return nil + default: + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + s.conns[st] = true + return true +} + +func (s *Server) removeConn(st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, st) + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, status.New(codes.OK, "")) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, status.New(codes.OK, "")) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.m[service] + if knownService { + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.serveWG.Wait() + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for st := range s.conns { + st.Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000..d0787f1e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,429 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy +} + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancer will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoffMS). In general, the nth attempt will occur at + // random(0, + // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +type loadBalancingConfig map[string]json.RawMessage + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *[]loadBalancingConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = func(sc string) (interface{}, error) { + return parseServiceConfig(sc) + } +} + +func parseServiceConfig(js string) (*ServiceConfig, error) { + if len(js) == 0 { + return nil, fmt.Errorf("no JSON service config provided") + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return nil, err + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if rsc.LoadBalancingConfig != nil { + for i, lbcfg := range *rsc.LoadBalancingConfig { + if len(lbcfg) != 1 { + err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + grpclog.Warningf(err.Error()) + return nil, err + } + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + sc.lbConfig = &lbConfig{name: name} + if parser, ok := builder.(balancer.ConfigParser); ok { + var err error + sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + } else if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + break + } + } + + if rsc.MethodConfig == nil { + return &sc, nil + } + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return nil, err + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return nil, err + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt) + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr) + } + } + return &sc, nil +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..53b27875a --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,48 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// This package is EXPERIMENTAL. +package serviceconfig + +import ( + "google.golang.org/grpc/internal" +) + +// Config represents an opaque data structure holding a service config. +type Config interface { + isConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancer config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// Parse parses the JSON service config provided into an internal form or +// returns an error if the config is invalid. +func Parse(ServiceConfigJSON string) (Config, error) { + c, err := internal.ParseServiceConfig(ServiceConfigJSON) + if err != nil { + return nil, err + } + return c.(Config), err +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..dc03731e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..f3f593c84 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..641c45c6f --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,217 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" +) + +func init() { + internal.StatusRawProto = statusRawProto +} + +func statusRawProto(s *Status) *spb.Status { return s.s } + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) GRPCStatus() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..db14c3225 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1511 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + cs.callInfo.stream = cs + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error { + cs.attempt = &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + cs.attempt.t = t + cs.attempt.done = done + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + attempt *csAttempt // the active client stream attempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + if cs.attempt.s == nil && !cs.callInfo.failFast { + // In the event of any error from NewStream (attempt.s == nil), we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + } + if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, wait-for-ready, stream unprocessed: transparently retry. + cs.firstAttempt = false + return nil + } + cs.firstAttempt = false + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + if err := cs.newAttemptLocked(nil, nil); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + if cs.attempt != nil { + cs.attempt.finish(err) + } + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + as.callInfo.stream = as + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..584360f68 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..0a57b9994 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..3ab788965 --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.22.0" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go new file mode 100644 index 000000000..8a5d1fbbb --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// Package v1alpha1 is the v1alpha1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration and validatingWebhookConfiguration is for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go similarity index 50% rename from vendor/k8s.io/api/coordination/v1/generated.pb.go rename to vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index 349c68574..b87f74e52 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -14,28 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +// DO NOT EDIT! /* - Package v1 is a generated protocol buffer package. + Package v1alpha1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto It has these top-level messages: - Lease - LeaseList - LeaseSpec + Initializer + InitializerConfiguration + InitializerConfigurationList + Rule */ -package v1 +package v1alpha1 import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - import strings "strings" import reflect "reflect" @@ -52,24 +52,33 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Initializer) Reset() { *m = Initializer{} } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } +func (*InitializerConfiguration) ProtoMessage() {} +func (*InitializerConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } +func (*InitializerConfigurationList) ProtoMessage() {} +func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func init() { - proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") - proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") - proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") + proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") + proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") + proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") } -func (m *Lease) Marshal() (dAtA []byte, err error) { +func (m *Initializer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -79,7 +88,41 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *InitializerConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -92,18 +135,22 @@ func (m *Lease) MarshalTo(dAtA []byte) (int, error) { return 0, err } i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Initializers) > 0 { + for _, msg := range m.Initializers { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - i += n2 return i, nil } -func (m *LeaseList) Marshal() (dAtA []byte, err error) { +func (m *InitializerConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -113,7 +160,7 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { +func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -121,11 +168,11 @@ func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n2 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -141,7 +188,7 @@ func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { +func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -151,50 +198,77 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) - } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) - } - if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i += n4 - } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i += n5 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -204,17 +278,35 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *Lease) Size() (n int) { +func (m *Initializer) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *InitializerConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Initializers) > 0 { + for _, e := range m.Initializers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *LeaseList) Size() (n int) { +func (m *InitializerConfigurationList) Size() (n int) { var l int _ = l l = m.ListMeta.Size() @@ -228,26 +320,26 @@ func (m *LeaseList) Size() (n int) { return n } -func (m *LeaseSpec) Size() (n int) { +func (m *Rule) Size() (n int) { var l int _ = l - if m.HolderIdentity != nil { - l = len(*m.HolderIdentity) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LeaseDurationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.LeaseDurationSeconds)) - } - if m.AcquireTime != nil { - l = m.AcquireTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.RenewTime != nil { - l = m.RenewTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.LeaseTransitions != nil { - n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } return n } @@ -265,38 +357,47 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *Lease) String() string { +func (this *Initializer) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Lease{`, + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InitializerConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InitializerConfiguration{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, + `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *LeaseList) String() string { +func (this *InitializerConfigurationList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&LeaseList{`, + s := strings.Join([]string{`&InitializerConfigurationList{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *LeaseSpec) String() string { +func (this *Rule) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&LeaseSpec{`, - `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, - `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, `}`, }, "") return s @@ -309,7 +410,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *Lease) Unmarshal(dAtA []byte) error { +func (m *Initializer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -332,17 +433,17 @@ func (m *Lease) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -352,25 +453,24 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -394,7 +494,8 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, Rule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -419,7 +520,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseList) Unmarshal(dAtA []byte) error { +func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -442,15 +543,15 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseList: wiretype end group for non-group") + return fmt.Errorf("proto: InitializerConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitializerConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -474,13 +575,13 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -504,8 +605,8 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Lease{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Initializers = append(m.Initializers, Initializer{}) + if err := m.Initializers[len(m.Initializers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -530,7 +631,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseSpec) Unmarshal(dAtA []byte) error { +func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -553,17 +654,17 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseSpec: wiretype end group for non-group") + return fmt.Errorf("proto: InitializerConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitializerConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HolderIdentity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -573,27 +674,27 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.HolderIdentity = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseDurationSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -603,17 +704,78 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.LeaseDurationSeconds = &v - case 3: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, InitializerConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AcquireTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -623,30 +785,26 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} - } - if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -656,30 +814,26 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} - } - if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseTransitions", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -689,12 +843,21 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.LeaseTransitions = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -822,43 +985,43 @@ var ( ) func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 535 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, - 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, - 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, - 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, - 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, - 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, - 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, - 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, - 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, - 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, - 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, - 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, - 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, - 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, - 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, - 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, - 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, - 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, - 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, - 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, - 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, - 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, - 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, - 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, - 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, - 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, - 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, - 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, - 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, - 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, - 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, - 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, - 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, + // 531 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x31, + 0x18, 0x6e, 0x6c, 0x0b, 0x6d, 0xda, 0x45, 0x19, 0x3c, 0x94, 0x22, 0xd3, 0xd2, 0x53, 0x45, 0x4c, + 0xec, 0x22, 0x8b, 0xd7, 0x9d, 0x3d, 0x48, 0xc1, 0x8f, 0x25, 0x88, 0x07, 0xf1, 0x60, 0xda, 0xbe, + 0x3b, 0x8d, 0xed, 0x4c, 0x86, 0x24, 0x53, 0xd0, 0x93, 0x17, 0xef, 0x82, 0x7f, 0xaa, 0xc7, 0x3d, + 0xee, 0xa9, 0xd8, 0x11, 0x3c, 0xfa, 0x1b, 0x24, 0x33, 0x9d, 0x9d, 0x59, 0xeb, 0xe2, 0xea, 0x2d, + 0xef, 0xf3, 0xe6, 0xf9, 0x4a, 0x30, 0x5b, 0x3c, 0xd1, 0x44, 0x48, 0xba, 0x88, 0x27, 0xa0, 0x42, + 0x30, 0xa0, 0xe9, 0x0a, 0xc2, 0x99, 0x54, 0x74, 0xb7, 0xe0, 0x91, 0xa0, 0x7c, 0x16, 0x08, 0xad, + 0x85, 0x0c, 0x15, 0xf8, 0x42, 0x1b, 0xc5, 0x8d, 0x90, 0x21, 0x5d, 0x8d, 0xf8, 0x32, 0x9a, 0xf3, + 0x11, 0xf5, 0x21, 0x04, 0xc5, 0x0d, 0xcc, 0x48, 0xa4, 0xa4, 0x91, 0xce, 0xfd, 0x8c, 0x4a, 0x78, + 0x24, 0xc8, 0x1f, 0xa9, 0x24, 0xa7, 0x76, 0x1f, 0xfa, 0xc2, 0xcc, 0xe3, 0x09, 0x99, 0xca, 0x80, + 0xfa, 0xd2, 0x97, 0x34, 0x55, 0x98, 0xc4, 0x67, 0xe9, 0x94, 0x0e, 0xe9, 0x29, 0x53, 0xee, 0x3e, + 0x2e, 0x42, 0x05, 0x7c, 0x3a, 0x17, 0x21, 0xa8, 0x0f, 0x34, 0x5a, 0xf8, 0x16, 0xd0, 0x34, 0x00, + 0xc3, 0xe9, 0x6a, 0x2f, 0x4f, 0x97, 0x5e, 0xc7, 0x52, 0x71, 0x68, 0x44, 0x00, 0x7b, 0x84, 0xa3, + 0xbf, 0x11, 0xf4, 0x74, 0x0e, 0x01, 0xff, 0x9d, 0x37, 0xf8, 0x8c, 0x70, 0x6b, 0x1c, 0x0a, 0x23, + 0xf8, 0x52, 0x7c, 0x04, 0xe5, 0xf4, 0x71, 0x2d, 0xe4, 0x01, 0x74, 0x50, 0x1f, 0x0d, 0x9b, 0x5e, + 0x7b, 0xbd, 0xe9, 0x55, 0x92, 0x4d, 0xaf, 0xf6, 0x82, 0x07, 0xc0, 0xd2, 0x8d, 0xf3, 0x0a, 0xd7, + 0x55, 0xbc, 0x04, 0xdd, 0xb9, 0xd5, 0xaf, 0x0e, 0x5b, 0x87, 0x94, 0xdc, 0xf8, 0xe9, 0x08, 0x8b, + 0x97, 0xe0, 0x1d, 0xec, 0x34, 0xeb, 0x76, 0xd2, 0x2c, 0x13, 0x1b, 0xfc, 0x44, 0xb8, 0x53, 0xca, + 0x71, 0x22, 0xc3, 0x33, 0xe1, 0xc7, 0x99, 0x80, 0xf3, 0x0e, 0x37, 0xec, 0x43, 0xcd, 0xb8, 0xe1, + 0x69, 0xb0, 0xd6, 0xe1, 0xa3, 0x92, 0xeb, 0x65, 0x5f, 0x12, 0x2d, 0x7c, 0x0b, 0x68, 0x62, 0x6f, + 0x93, 0xd5, 0x88, 0xbc, 0x9c, 0xbc, 0x87, 0xa9, 0x79, 0x0e, 0x86, 0x7b, 0xce, 0xce, 0x16, 0x17, + 0x18, 0xbb, 0x54, 0x75, 0x22, 0xdc, 0x16, 0x85, 0x7b, 0xde, 0xed, 0xe8, 0x1f, 0xba, 0x95, 0xc2, + 0x7b, 0x77, 0x77, 0x5e, 0xed, 0x12, 0xa8, 0xd9, 0x15, 0x87, 0xc1, 0x0f, 0x84, 0xef, 0x5d, 0x57, + 0xf8, 0x99, 0xd0, 0xc6, 0x79, 0xbb, 0x57, 0x9a, 0xdc, 0xac, 0xb4, 0x65, 0xa7, 0x95, 0xef, 0xec, + 0x62, 0x34, 0x72, 0xa4, 0x54, 0x78, 0x8e, 0xeb, 0xc2, 0x40, 0x90, 0x37, 0x3d, 0xf9, 0xbf, 0xa6, + 0x57, 0x52, 0x17, 0x3f, 0x3b, 0xb6, 0xca, 0x2c, 0x33, 0x18, 0x7c, 0x45, 0xb8, 0x66, 0xbf, 0xda, + 0x79, 0x80, 0x9b, 0x3c, 0x12, 0x4f, 0x95, 0x8c, 0x23, 0xdd, 0x41, 0xfd, 0xea, 0xb0, 0xe9, 0x1d, + 0x24, 0x9b, 0x5e, 0xf3, 0xf8, 0x74, 0x9c, 0x81, 0xac, 0xd8, 0x3b, 0x23, 0xdc, 0xe2, 0x91, 0x78, + 0x0d, 0xca, 0xe6, 0xc8, 0x52, 0x36, 0xbd, 0xdb, 0xc9, 0xa6, 0xd7, 0x3a, 0x3e, 0x1d, 0xe7, 0x30, + 0x2b, 0xdf, 0xb1, 0xfa, 0x0a, 0xb4, 0x8c, 0xd5, 0x14, 0x74, 0xa7, 0x5a, 0xe8, 0xb3, 0x1c, 0x64, + 0xc5, 0xde, 0x23, 0xeb, 0xad, 0x5b, 0x39, 0xdf, 0xba, 0x95, 0x8b, 0xad, 0x5b, 0xf9, 0x94, 0xb8, + 0x68, 0x9d, 0xb8, 0xe8, 0x3c, 0x71, 0xd1, 0x45, 0xe2, 0xa2, 0x6f, 0x89, 0x8b, 0xbe, 0x7c, 0x77, + 0x2b, 0x6f, 0x1a, 0x79, 0xe9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x06, 0xa3, 0xcb, 0x75, + 0x04, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/coordination/v1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go similarity index 86% rename from vendor/k8s.io/api/coordination/v1/register.go rename to vendor/k8s.io/api/admissionregistration/v1alpha1/register.go index 95b987b98..e9a4164c3 100644 --- a/vendor/k8s.io/api/coordination/v1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,11 +22,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "coordination.k8s.io" +const GroupName = "admissionregistration.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -41,13 +40,12 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Lease{}, - &LeaseList{}, + &InitializerConfiguration{}, + &InitializerConfigurationList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go new file mode 100644 index 000000000..a245f1e85 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitializerConfiguration describes the configuration of initializers. +type InitializerConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Initializers is a list of resources and their default initializers + // Order-sensitive. + // When merging multiple InitializerConfigurations, we sort the initializers + // from different InitializerConfigurations by the name of the + // InitializerConfigurations; the order of the initializers from the same + // InitializerConfiguration is preserved. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitializerConfigurationList is a list of InitializerConfiguration. +type InitializerConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of InitializerConfiguration. + Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Initializer describes the name and the failure policy of an initializer, and +// what resources it applies to. +type Initializer struct { + // Name is the identifier of the initializer. It will be added to the + // object that needs to be initialized. + // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where + // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Rules describes what resources/subresources the initializer cares about. + // The initializer cares about an operation if it matches _any_ Rule. + // Rule.Resources must not include subresources. + Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"` +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..69e4b7c64 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Initializer = map[string]string{ + "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", + "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", + "rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", +} + +func (Initializer) SwaggerDoc() map[string]string { + return map_Initializer +} + +var map_InitializerConfiguration = map[string]string{ + "": "InitializerConfiguration describes the configuration of initializers.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", +} + +func (InitializerConfiguration) SwaggerDoc() map[string]string { + return map_InitializerConfiguration +} + +var map_InitializerConfigurationList = map[string]string{ + "": "InitializerConfigurationList is a list of InitializerConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of InitializerConfiguration.", +} + +func (InitializerConfigurationList) SwaggerDoc() map[string]string { + return map_InitializerConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go similarity index 51% rename from vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 91b8d8016..9f636b484 100644 --- a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -25,26 +25,55 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { +func (in *Initializer) DeepCopyInto(out *Initializer) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { + if in == nil { + return nil + } + out := new(Initializer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + *out = make([]Initializer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. -func (in *RuntimeClass) DeepCopy() *RuntimeClass { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. +func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { if in == nil { return nil } - out := new(RuntimeClass) + out := new(InitializerConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClass) DeepCopyObject() runtime.Object { +func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -52,13 +81,13 @@ func (in *RuntimeClass) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { +func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]RuntimeClass, len(*in)) + *out = make([]InitializerConfiguration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -66,18 +95,18 @@ func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. -func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. +func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { if in == nil { return nil } - out := new(RuntimeClassList) + out := new(InitializerConfigurationList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClassList) DeepCopyObject() runtime.Object { +func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -85,17 +114,32 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { +func (in *Rule) DeepCopyInto(out *Rule) { *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassSpec. -func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { if in == nil { return nil } - out := new(RuntimeClassSpec) + out := new(Rule) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index 0a40726fa..afbb3d6d3 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -15,12 +15,11 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the +// InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index b7ab68acb..d6c9d958b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -24,15 +25,14 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto It has these top-level messages: - MutatingWebhook MutatingWebhookConfiguration MutatingWebhookConfigurationList Rule RuleWithOperations ServiceReference - ValidatingWebhook ValidatingWebhookConfiguration ValidatingWebhookConfigurationList + Webhook WebhookClientConfig */ package v1beta1 @@ -59,172 +59,61 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } -func (*MutatingWebhook) ProtoMessage() {} -func (*MutatingWebhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptorGenerated, []int{0} } func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{1} } func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } -func (*ValidatingWebhook) ProtoMessage() {} -func (*ValidatingWebhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptorGenerated, []int{5} } func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptorGenerated, []int{6} } +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func init() { - proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") - proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } -func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n1, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n2, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) - } - if m.TimeoutSeconds != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - dAtA[i] = 0x42 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.MatchPolicy != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i += copy(dAtA[i:], *m.MatchPolicy) - } - if m.ReinvocationPolicy != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) - i += copy(dAtA[i:], *m.ReinvocationPolicy) - } - if m.ObjectSelector != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectSelector.Size())) - n3, err := m.ObjectSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -243,11 +132,11 @@ func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n1 if len(m.Webhooks) > 0 { for _, msg := range m.Webhooks { dAtA[i] = 0x12 @@ -281,11 +170,11 @@ func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n2 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -361,12 +250,6 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - if m.Scope != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i += copy(dAtA[i:], *m.Scope) - } return i, nil } @@ -403,11 +286,11 @@ func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n6, err := m.Rule.MarshalTo(dAtA[i:]) + n3, err := m.Rule.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n3 return i, nil } @@ -440,15 +323,10 @@ func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) i += copy(dAtA[i:], *m.Path) } - if m.Port != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - } return i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -458,26 +336,22 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a + i += n4 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) @@ -487,68 +361,10 @@ func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { i += n } } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n8, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) - } - if m.TimeoutSeconds != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - dAtA[i] = 0x42 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.MatchPolicy != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i += copy(dAtA[i:], *m.MatchPolicy) - } - if m.ObjectSelector != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectSelector.Size())) - n9, err := m.ObjectSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } return i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -558,21 +374,21 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) @@ -586,7 +402,7 @@ func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *Webhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -596,22 +412,26 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 + i += n6 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) @@ -621,6 +441,28 @@ func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) i += n } } + if m.FailurePolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i += copy(dAtA[i:], *m.FailurePolicy) + } + if m.NamespaceSelector != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.SideEffects != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i += copy(dAtA[i:], *m.SideEffects) + } return i, nil } @@ -643,11 +485,11 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n12, err := m.Service.MarshalTo(dAtA[i:]) + n8, err := m.Service.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n8 } if m.CABundle != nil { dAtA[i] = 0x12 @@ -664,6 +506,24 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -673,55 +533,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *MutatingWebhook) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *MutatingWebhookConfiguration) Size() (n int) { var l int _ = l @@ -771,10 +582,6 @@ func (m *Rule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.Scope != nil { - l = len(*m.Scope) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -803,64 +610,30 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) - } return n } -func (m *ValidatingWebhook) Size() (n int) { +func (m *ValidatingWebhookConfiguration) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *ValidatingWebhookConfiguration) Size() (n int) { +func (m *ValidatingWebhookConfigurationList) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { + if len(m.Items) > 0 { + for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -868,17 +641,31 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) { return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { +func (m *Webhook) Size() (n int) { var l int _ = l - l = m.ListMeta.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -913,33 +700,13 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} func (this *MutatingWebhookConfiguration) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&MutatingWebhookConfiguration{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -963,7 +730,6 @@ func (this *Rule) String() string { `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `Scope:` + valueToStringGenerated(this.Scope) + `,`, `}`, }, "") return s @@ -987,26 +753,6 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingWebhook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ValidatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1017,7 +763,7 @@ func (this *ValidatingWebhookConfiguration) String() string { } s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1033,6 +779,21 @@ func (this *ValidatingWebhookConfigurationList) String() string { }, "") return s } +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `}`, + }, "") + return s +} func (this *WebhookClientConfig) String() string { if this == nil { return "nil" @@ -1046,702 +807,47 @@ func (this *WebhookClientConfig) String() string { return s } func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) - m.ReinvocationPolicy = &s - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ObjectSelector == nil { - m.ObjectSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Webhooks = append(m.Webhooks, MutatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Rule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1751,26 +857,27 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1780,21 +887,22 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1817,7 +925,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1840,17 +948,17 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1860,24 +968,25 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1901,7 +1010,8 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1926,7 +1036,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceReference) Unmarshal(dAtA []byte) error { +func (m *Rule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1949,15 +1059,15 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + return fmt.Errorf("proto: Rule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1982,11 +1092,11 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2011,11 +1121,11 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2040,29 +1150,8 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2084,7 +1173,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2107,15 +1196,15 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2140,11 +1229,11 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2168,107 +1257,63 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2293,14 +1338,13 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2310,15 +1354,24 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v - case 8: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2343,13 +1396,64 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2359,25 +1463,25 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2401,10 +1505,8 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2429,7 +1531,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2452,15 +1554,15 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2484,13 +1586,13 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2514,8 +1616,8 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2540,7 +1642,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *Webhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2563,15 +1665,44 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2595,13 +1726,13 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2625,11 +1756,104 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2905,75 +2129,62 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1113 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xcf, 0xc6, 0x76, 0x6d, 0x8f, 0x93, 0xa6, 0x99, 0xff, 0x9f, 0xd6, 0x84, 0xca, 0x6b, 0xf9, - 0x80, 0x2c, 0x41, 0x77, 0x9b, 0x80, 0x10, 0x14, 0x10, 0xca, 0x06, 0x0a, 0x91, 0x92, 0x36, 0x4c, - 0xfa, 0x22, 0xf1, 0x22, 0x75, 0xbc, 0x1e, 0xdb, 0x83, 0xed, 0x9d, 0xd5, 0xce, 0xac, 0x43, 0x6e, - 0x7c, 0x04, 0xbe, 0x02, 0x27, 0x3e, 0x05, 0x07, 0x6e, 0xe1, 0xd6, 0x63, 0x2f, 0xac, 0xc8, 0x72, - 0xe2, 0xc0, 0x81, 0x6b, 0x4e, 0x68, 0x66, 0xc7, 0xeb, 0x97, 0x4d, 0x8a, 0x29, 0xa2, 0x17, 0x7a, - 0xdb, 0xf9, 0x3d, 0xf3, 0xfc, 0x9e, 0x97, 0xd9, 0xe7, 0xf9, 0x81, 0x4f, 0xfb, 0x6f, 0x73, 0x8b, - 0x32, 0xbb, 0x1f, 0xb6, 0x48, 0xe0, 0x11, 0x41, 0xb8, 0x3d, 0x22, 0x5e, 0x9b, 0x05, 0xb6, 0x36, - 0x60, 0x9f, 0xda, 0xb8, 0x3d, 0xa4, 0x9c, 0x53, 0xe6, 0x05, 0xa4, 0x4b, 0xb9, 0x08, 0xb0, 0xa0, - 0xcc, 0xb3, 0x47, 0x9b, 0x2d, 0x22, 0xf0, 0xa6, 0xdd, 0x25, 0x1e, 0x09, 0xb0, 0x20, 0x6d, 0xcb, - 0x0f, 0x98, 0x60, 0xb0, 0x99, 0x78, 0x5a, 0xd8, 0xa7, 0xd6, 0xb9, 0x9e, 0x96, 0xf6, 0xdc, 0xb8, - 0xd1, 0xa5, 0xa2, 0x17, 0xb6, 0x2c, 0x97, 0x0d, 0xed, 0x2e, 0xeb, 0x32, 0x5b, 0x11, 0xb4, 0xc2, - 0x8e, 0x3a, 0xa9, 0x83, 0xfa, 0x4a, 0x88, 0x37, 0xde, 0x9c, 0xa4, 0x34, 0xc4, 0x6e, 0x8f, 0x7a, - 0x24, 0x38, 0xb6, 0xfd, 0x7e, 0x57, 0x02, 0xdc, 0x1e, 0x12, 0x81, 0xed, 0x51, 0x26, 0x9d, 0x0d, - 0xfb, 0x22, 0xaf, 0x20, 0xf4, 0x04, 0x1d, 0x92, 0x8c, 0xc3, 0x5b, 0x7f, 0xe5, 0xc0, 0xdd, 0x1e, - 0x19, 0xe2, 0x79, 0xbf, 0xc6, 0x4f, 0x45, 0xb0, 0xb6, 0x1f, 0x0a, 0x2c, 0xa8, 0xd7, 0x7d, 0x48, - 0x5a, 0x3d, 0xc6, 0xfa, 0xb0, 0x0e, 0xf2, 0x1e, 0x1e, 0x92, 0xaa, 0x51, 0x37, 0x9a, 0x65, 0x67, - 0xe5, 0x24, 0x32, 0x97, 0xe2, 0xc8, 0xcc, 0xdf, 0xc1, 0x43, 0x82, 0x94, 0x05, 0x1e, 0x81, 0x15, - 0x77, 0x40, 0x89, 0x27, 0x76, 0x98, 0xd7, 0xa1, 0xdd, 0xea, 0x72, 0xdd, 0x68, 0x56, 0xb6, 0xde, - 0xb7, 0x16, 0x6d, 0xa2, 0xa5, 0x43, 0xed, 0x4c, 0x91, 0x38, 0xff, 0xd7, 0x81, 0x56, 0xa6, 0x51, - 0x34, 0x13, 0x08, 0x62, 0x50, 0x08, 0xc2, 0x01, 0xe1, 0xd5, 0x5c, 0x3d, 0xd7, 0xac, 0x6c, 0xbd, - 0xb7, 0x78, 0x44, 0x14, 0x0e, 0xc8, 0x43, 0x2a, 0x7a, 0x77, 0x7d, 0x92, 0x58, 0xb8, 0xb3, 0xaa, - 0x03, 0x16, 0xa4, 0x8d, 0xa3, 0x84, 0x19, 0xee, 0x81, 0xd5, 0x0e, 0xa6, 0x83, 0x30, 0x20, 0x07, - 0x6c, 0x40, 0xdd, 0xe3, 0x6a, 0x5e, 0xb5, 0xe1, 0xd5, 0x38, 0x32, 0x57, 0x6f, 0x4f, 0x1b, 0xce, - 0x22, 0x73, 0x7d, 0x06, 0xb8, 0x77, 0xec, 0x13, 0x34, 0xeb, 0x0c, 0xbf, 0x06, 0xeb, 0xb2, 0x63, - 0xdc, 0xc7, 0x2e, 0x39, 0x24, 0x03, 0xe2, 0x0a, 0x16, 0x54, 0x0b, 0xaa, 0x5d, 0x6f, 0x4c, 0x25, - 0x9f, 0xbe, 0x99, 0xe5, 0xf7, 0xbb, 0x12, 0xe0, 0x96, 0xfc, 0x35, 0xac, 0xd1, 0xa6, 0xb5, 0x87, - 0x5b, 0x64, 0x30, 0x76, 0x75, 0x5e, 0x8a, 0x23, 0x73, 0xfd, 0xce, 0x3c, 0x23, 0xca, 0x06, 0x81, - 0x1f, 0x82, 0x0a, 0xa7, 0x6d, 0xf2, 0x51, 0xa7, 0x43, 0x5c, 0xc1, 0xab, 0x97, 0x54, 0x15, 0x8d, - 0x38, 0x32, 0x2b, 0x87, 0x13, 0xf8, 0x2c, 0x32, 0xd7, 0x26, 0xc7, 0x9d, 0x01, 0xe6, 0x1c, 0x4d, - 0xbb, 0xc1, 0x5b, 0xe0, 0xb2, 0xfc, 0x7d, 0x58, 0x28, 0x0e, 0x89, 0xcb, 0xbc, 0x36, 0xaf, 0x16, - 0xeb, 0x46, 0xb3, 0xe0, 0xc0, 0x38, 0x32, 0x2f, 0xdf, 0x9b, 0xb1, 0xa0, 0xb9, 0x9b, 0xf0, 0x3e, - 0xb8, 0x96, 0xbe, 0x09, 0x22, 0x23, 0x4a, 0x8e, 0x1e, 0x90, 0x40, 0x1e, 0x78, 0xb5, 0x54, 0xcf, - 0x35, 0xcb, 0xce, 0x2b, 0x71, 0x64, 0x5e, 0xdb, 0x3e, 0xff, 0x0a, 0xba, 0xc8, 0x57, 0x16, 0x36, - 0xc4, 0xc2, 0xed, 0xe9, 0xe7, 0x29, 0x4f, 0x0a, 0xdb, 0x9f, 0xc0, 0xb2, 0xb0, 0xa9, 0xa3, 0x7a, - 0x9a, 0x69, 0x37, 0xf8, 0x08, 0xc0, 0x80, 0x50, 0x6f, 0xc4, 0x5c, 0xf5, 0x37, 0x68, 0x32, 0xa0, - 0xc8, 0x6e, 0xc6, 0x91, 0x09, 0x51, 0xc6, 0x7a, 0x16, 0x99, 0x57, 0xb3, 0xa8, 0xa2, 0x3e, 0x87, - 0x0b, 0x32, 0x70, 0x99, 0xb5, 0xbe, 0x22, 0xae, 0x48, 0xdf, 0xbd, 0xf2, 0xec, 0xef, 0xae, 0xfa, - 0x7d, 0x77, 0x86, 0x0e, 0xcd, 0xd1, 0x37, 0x7e, 0x36, 0xc0, 0xf5, 0xb9, 0x59, 0x4e, 0xc6, 0x26, - 0x4c, 0xfe, 0x78, 0xf8, 0x08, 0x94, 0x24, 0x7b, 0x1b, 0x0b, 0xac, 0x86, 0xbb, 0xb2, 0x75, 0x73, - 0xb1, 0x5c, 0x92, 0xc0, 0xfb, 0x44, 0x60, 0x07, 0xea, 0xa1, 0x01, 0x13, 0x0c, 0xa5, 0xac, 0xf0, - 0x73, 0x50, 0xd2, 0x91, 0x79, 0x75, 0x59, 0x8d, 0xe8, 0x3b, 0x8b, 0x8f, 0xe8, 0x5c, 0xee, 0x4e, - 0x5e, 0x86, 0x42, 0xa5, 0x23, 0x4d, 0xd8, 0xf8, 0xdd, 0x00, 0xf5, 0xa7, 0xd5, 0xb7, 0x47, 0xb9, - 0x80, 0x5f, 0x64, 0x6a, 0xb4, 0x16, 0xec, 0x37, 0xe5, 0x49, 0x85, 0x57, 0x74, 0x85, 0xa5, 0x31, - 0x32, 0x55, 0x5f, 0x1f, 0x14, 0xa8, 0x20, 0xc3, 0x71, 0x71, 0xb7, 0x9f, 0xb9, 0xb8, 0x99, 0xc4, - 0x27, 0x9b, 0x68, 0x57, 0x92, 0xa3, 0x24, 0x46, 0xe3, 0x47, 0x03, 0xe4, 0xe5, 0x6a, 0x82, 0xaf, - 0x81, 0x32, 0xf6, 0xe9, 0xc7, 0x01, 0x0b, 0x7d, 0x5e, 0x35, 0xd4, 0xe8, 0xac, 0xc6, 0x91, 0x59, - 0xde, 0x3e, 0xd8, 0x4d, 0x40, 0x34, 0xb1, 0xc3, 0x4d, 0x50, 0xc1, 0x3e, 0x4d, 0x27, 0x6d, 0x59, - 0x5d, 0x5f, 0x93, 0xe3, 0xb1, 0x7d, 0xb0, 0x9b, 0x4e, 0xd7, 0xf4, 0x1d, 0xc9, 0x1f, 0x10, 0xce, - 0xc2, 0xc0, 0xd5, 0x9b, 0x55, 0xf3, 0xa3, 0x31, 0x88, 0x26, 0x76, 0xf8, 0x3a, 0x28, 0x70, 0x97, - 0xf9, 0x44, 0xef, 0xc5, 0xab, 0x32, 0xed, 0x43, 0x09, 0x9c, 0x45, 0x66, 0x59, 0x7d, 0xa8, 0x89, - 0x48, 0x2e, 0x35, 0xbe, 0x37, 0x00, 0xcc, 0xae, 0x5e, 0xf8, 0x01, 0x00, 0x2c, 0x3d, 0xe9, 0x92, - 0x4c, 0xf5, 0x57, 0xa5, 0xe8, 0x59, 0x64, 0xae, 0xa6, 0x27, 0x45, 0x39, 0xe5, 0x02, 0x0f, 0x40, - 0x5e, 0xae, 0x6b, 0xad, 0x3c, 0xd6, 0xdf, 0xd3, 0x81, 0x89, 0xa6, 0xc9, 0x13, 0x52, 0x4c, 0x8d, - 0xef, 0x0c, 0x70, 0xe5, 0x90, 0x04, 0x23, 0xea, 0x12, 0x44, 0x3a, 0x24, 0x20, 0x9e, 0x4b, 0xa0, - 0x0d, 0xca, 0xe9, 0x66, 0xd5, 0x7a, 0xb8, 0xae, 0x7d, 0xcb, 0xe9, 0x16, 0x46, 0x93, 0x3b, 0xa9, - 0x76, 0x2e, 0x5f, 0xa8, 0x9d, 0xd7, 0x41, 0xde, 0xc7, 0xa2, 0x57, 0xcd, 0xa9, 0x1b, 0x25, 0x69, - 0x3d, 0xc0, 0xa2, 0x87, 0x14, 0xaa, 0xac, 0x2c, 0x10, 0xaa, 0xb9, 0x05, 0x6d, 0x65, 0x81, 0x40, - 0x0a, 0x6d, 0xfc, 0x76, 0x09, 0xac, 0x3f, 0xc0, 0x03, 0xda, 0x7e, 0xa1, 0xd7, 0x2f, 0xf4, 0xfa, - 0xbf, 0xa5, 0xd7, 0x59, 0x35, 0x05, 0xff, 0xae, 0x9a, 0x9e, 0x1a, 0xa0, 0x96, 0x99, 0xb5, 0xe7, - 0xad, 0xa7, 0x5f, 0x66, 0xf4, 0xf4, 0xdd, 0xc5, 0x47, 0x28, 0x93, 0x7d, 0x46, 0x51, 0xff, 0x30, - 0x40, 0xe3, 0xe9, 0x35, 0x3e, 0x07, 0x4d, 0x1d, 0xce, 0x6a, 0xea, 0x27, 0xff, 0xa0, 0xc0, 0x45, - 0x54, 0xf5, 0x07, 0x03, 0xfc, 0xef, 0x9c, 0x75, 0x06, 0x31, 0x28, 0xf2, 0x64, 0xfd, 0xeb, 0x1a, - 0x6f, 0x2d, 0x9e, 0xc8, 0xbc, 0x6e, 0x38, 0x95, 0x38, 0x32, 0x8b, 0x63, 0x74, 0xcc, 0x0b, 0x9b, - 0xa0, 0xe4, 0x62, 0x27, 0xf4, 0xda, 0x5a, 0xb8, 0x56, 0x9c, 0x15, 0xd9, 0x93, 0x9d, 0xed, 0x04, - 0x43, 0xa9, 0x15, 0xbe, 0x0c, 0x72, 0x61, 0x30, 0xd0, 0x1a, 0x51, 0x8c, 0x23, 0x33, 0x77, 0x1f, - 0xed, 0x21, 0x89, 0x39, 0x37, 0x4e, 0x4e, 0x6b, 0x4b, 0x8f, 0x4f, 0x6b, 0x4b, 0x4f, 0x4e, 0x6b, - 0x4b, 0xdf, 0xc4, 0x35, 0xe3, 0x24, 0xae, 0x19, 0x8f, 0xe3, 0x9a, 0xf1, 0x24, 0xae, 0x19, 0xbf, - 0xc4, 0x35, 0xe3, 0xdb, 0x5f, 0x6b, 0x4b, 0x9f, 0x15, 0x75, 0x6a, 0x7f, 0x06, 0x00, 0x00, 0xff, - 0xff, 0xc3, 0x6f, 0x8b, 0x7e, 0x2c, 0x0f, 0x00, 0x00, + // 906 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0x37, 0x29, 0x49, 0x26, 0x89, 0x76, 0x3b, 0x80, 0x14, 0xaa, 0x95, 0x1d, 0xe5, 0x80, + 0x22, 0xa1, 0xb5, 0x49, 0x41, 0x08, 0x21, 0x10, 0xaa, 0x0b, 0x0b, 0x95, 0xba, 0xbb, 0x61, 0x0a, + 0xbb, 0x12, 0xe2, 0xc0, 0xc4, 0x79, 0x49, 0x86, 0xf8, 0x97, 0x66, 0xc6, 0x59, 0x7a, 0x43, 0xe2, + 0x1f, 0x40, 0x42, 0xfc, 0x0d, 0xfc, 0x15, 0xdc, 0x7b, 0xdc, 0x0b, 0x62, 0x4f, 0x16, 0x35, 0x67, + 0x0e, 0x5c, 0x7b, 0x42, 0x63, 0x3b, 0x71, 0xd2, 0x6c, 0xbb, 0xe9, 0x85, 0x03, 0x37, 0xcf, 0xf7, + 0xe6, 0xfb, 0xde, 0xfb, 0x9e, 0xdf, 0x1b, 0xf4, 0xc5, 0xec, 0x7d, 0x61, 0xb2, 0xc0, 0x9a, 0x45, + 0x43, 0xe0, 0x3e, 0x48, 0x10, 0xd6, 0x1c, 0xfc, 0x51, 0xc0, 0xad, 0x3c, 0x40, 0x43, 0x66, 0xd1, + 0x91, 0xc7, 0x84, 0x60, 0x81, 0xcf, 0x61, 0xc2, 0x84, 0xe4, 0x54, 0xb2, 0xc0, 0xb7, 0xe6, 0xfd, + 0x21, 0x48, 0xda, 0xb7, 0x26, 0xe0, 0x03, 0xa7, 0x12, 0x46, 0x66, 0xc8, 0x03, 0x19, 0xe0, 0x5e, + 0xc6, 0x34, 0x69, 0xc8, 0xcc, 0x17, 0x32, 0xcd, 0x9c, 0xb9, 0x77, 0x6f, 0xc2, 0xe4, 0x34, 0x1a, + 0x9a, 0x4e, 0xe0, 0x59, 0x93, 0x60, 0x12, 0x58, 0xa9, 0xc0, 0x30, 0x1a, 0xa7, 0xa7, 0xf4, 0x90, + 0x7e, 0x65, 0xc2, 0x7b, 0xef, 0x16, 0x25, 0x79, 0xd4, 0x99, 0x32, 0x1f, 0xf8, 0xa9, 0x15, 0xce, + 0x26, 0x0a, 0x10, 0x96, 0x07, 0x92, 0x5a, 0xf3, 0x8d, 0x72, 0xf6, 0xac, 0xab, 0x58, 0x3c, 0xf2, + 0x25, 0xf3, 0x60, 0x83, 0xf0, 0xde, 0xcb, 0x08, 0xc2, 0x99, 0x82, 0x47, 0x2f, 0xf3, 0xba, 0xbf, + 0x6b, 0xe8, 0xee, 0x83, 0x48, 0x52, 0xc9, 0xfc, 0xc9, 0x13, 0x18, 0x4e, 0x83, 0x60, 0x76, 0x18, + 0xf8, 0x63, 0x36, 0x89, 0x32, 0xdb, 0xf8, 0x5b, 0x54, 0x53, 0x45, 0x8e, 0xa8, 0xa4, 0x6d, 0xad, + 0xa3, 0xf5, 0x1a, 0xfb, 0x6f, 0x9b, 0x45, 0xaf, 0x96, 0xb9, 0xcc, 0x70, 0x36, 0x51, 0x80, 0x30, + 0xd5, 0x6d, 0x73, 0xde, 0x37, 0x1f, 0x0d, 0xbf, 0x03, 0x47, 0x3e, 0x00, 0x49, 0x6d, 0x7c, 0x16, + 0x1b, 0xa5, 0x24, 0x36, 0x50, 0x81, 0x91, 0xa5, 0x2a, 0x3e, 0x41, 0xb5, 0x3c, 0xb3, 0x68, 0xdf, + 0xea, 0x94, 0x7b, 0x8d, 0xfd, 0xbe, 0xb9, 0xed, 0xdf, 0x30, 0x73, 0xa6, 0x5d, 0x51, 0x29, 0x48, + 0xed, 0x69, 0x2e, 0xd4, 0xfd, 0x5b, 0x43, 0x9d, 0xeb, 0x7c, 0x1d, 0x33, 0x21, 0xf1, 0x37, 0x1b, + 0xde, 0xcc, 0xed, 0xbc, 0x29, 0x76, 0xea, 0xec, 0x4e, 0xee, 0xac, 0xb6, 0x40, 0x56, 0x7c, 0xcd, + 0xd0, 0x0e, 0x93, 0xe0, 0x2d, 0x4c, 0xdd, 0xdf, 0xde, 0xd4, 0x75, 0x85, 0xdb, 0xad, 0x3c, 0xe5, + 0xce, 0x91, 0x12, 0x27, 0x59, 0x8e, 0xee, 0xcf, 0x1a, 0xaa, 0x90, 0xc8, 0x05, 0xfc, 0x16, 0xaa, + 0xd3, 0x90, 0x7d, 0xc6, 0x83, 0x28, 0x14, 0x6d, 0xad, 0x53, 0xee, 0xd5, 0xed, 0x56, 0x12, 0x1b, + 0xf5, 0x83, 0xc1, 0x51, 0x06, 0x92, 0x22, 0x8e, 0xfb, 0xa8, 0x41, 0x43, 0xf6, 0x18, 0xb8, 0x2a, + 0x25, 0x2b, 0xb4, 0x6e, 0xdf, 0x4e, 0x62, 0xa3, 0x71, 0x30, 0x38, 0x5a, 0xc0, 0x64, 0xf5, 0x8e, + 0xd2, 0xe7, 0x20, 0x82, 0x88, 0x3b, 0x20, 0xda, 0xe5, 0x42, 0x9f, 0x2c, 0x40, 0x52, 0xc4, 0xbb, + 0xbf, 0x6a, 0x08, 0xab, 0xaa, 0x9e, 0x30, 0x39, 0x7d, 0x14, 0x42, 0xe6, 0x40, 0xe0, 0x8f, 0x11, + 0x0a, 0x96, 0xa7, 0xbc, 0x48, 0x23, 0x9d, 0x8f, 0x25, 0x7a, 0x11, 0x1b, 0xad, 0xe5, 0xe9, 0xcb, + 0xd3, 0x10, 0xc8, 0x0a, 0x05, 0x0f, 0x50, 0x85, 0x47, 0x2e, 0xb4, 0x6f, 0x6d, 0xfc, 0xb4, 0x97, + 0x74, 0x56, 0x15, 0x63, 0x37, 0xf3, 0x0e, 0xa6, 0x0d, 0x23, 0xa9, 0x52, 0xf7, 0x47, 0x0d, 0xdd, + 0x39, 0x01, 0x3e, 0x67, 0x0e, 0x10, 0x18, 0x03, 0x07, 0xdf, 0x01, 0x6c, 0xa1, 0xba, 0x4f, 0x3d, + 0x10, 0x21, 0x75, 0x20, 0x1d, 0x90, 0xba, 0xbd, 0x9b, 0x73, 0xeb, 0x0f, 0x17, 0x01, 0x52, 0xdc, + 0xc1, 0x1d, 0x54, 0x51, 0x87, 0xb4, 0xae, 0x7a, 0x91, 0x47, 0xdd, 0x25, 0x69, 0x04, 0xdf, 0x45, + 0x95, 0x90, 0xca, 0x69, 0xbb, 0x9c, 0xde, 0xa8, 0xa9, 0xe8, 0x80, 0xca, 0x29, 0x49, 0xd1, 0xee, + 0x1f, 0x1a, 0xd2, 0x1f, 0x53, 0x97, 0x8d, 0xfe, 0x77, 0xfb, 0xf8, 0x8f, 0x86, 0xba, 0xd7, 0x3b, + 0xfb, 0x0f, 0x36, 0xd2, 0x5b, 0xdf, 0xc8, 0xcf, 0xb7, 0xb7, 0x75, 0x7d, 0xe9, 0x57, 0xec, 0xe4, + 0x2f, 0x15, 0x54, 0xcd, 0xaf, 0x2f, 0x27, 0x43, 0xbb, 0x72, 0x32, 0x9e, 0xa2, 0xa6, 0xe3, 0x32, + 0xf0, 0x65, 0x26, 0x9d, 0xcf, 0xf6, 0x47, 0x37, 0x6e, 0xfd, 0xe1, 0x8a, 0x88, 0xfd, 0x5a, 0x9e, + 0xa8, 0xb9, 0x8a, 0x92, 0xb5, 0x44, 0x98, 0xa2, 0x1d, 0xb5, 0x02, 0xd9, 0x36, 0x37, 0xf6, 0x3f, + 0xbc, 0xd9, 0x36, 0xad, 0xaf, 0x76, 0xd1, 0x09, 0x15, 0x13, 0x24, 0x53, 0xc6, 0xc7, 0xa8, 0x35, + 0xa6, 0xcc, 0x8d, 0x38, 0x0c, 0x02, 0x97, 0x39, 0xa7, 0xed, 0x4a, 0xda, 0x86, 0x37, 0x93, 0xd8, + 0x68, 0xdd, 0x5f, 0x0d, 0x5c, 0xc4, 0xc6, 0xee, 0x1a, 0x90, 0xae, 0xfe, 0x3a, 0x19, 0x7f, 0x8f, + 0x76, 0x97, 0x2b, 0x77, 0x02, 0x2e, 0x38, 0x32, 0xe0, 0xed, 0x9d, 0xb4, 0x5d, 0xef, 0x6c, 0x39, + 0x2d, 0x74, 0x08, 0xee, 0x82, 0x6a, 0xbf, 0x9e, 0xc4, 0xc6, 0xee, 0xc3, 0xcb, 0x8a, 0x64, 0x33, + 0x09, 0xfe, 0x04, 0x35, 0x04, 0x1b, 0xc1, 0xa7, 0xe3, 0x31, 0x38, 0x52, 0xb4, 0x5f, 0x49, 0x5d, + 0x74, 0xd5, 0x7b, 0x79, 0x52, 0xc0, 0x17, 0xb1, 0x71, 0xbb, 0x38, 0x1e, 0xba, 0x54, 0x08, 0xb2, + 0x4a, 0xeb, 0xfe, 0xa6, 0xa1, 0x57, 0x5f, 0xf0, 0xb3, 0x30, 0x45, 0x55, 0x91, 0x3d, 0x41, 0xf9, + 0xec, 0x7f, 0xb0, 0xfd, 0xaf, 0xb8, 0xfc, 0x76, 0xd9, 0x8d, 0x24, 0x36, 0xaa, 0x0b, 0x74, 0xa1, + 0x8b, 0x7b, 0xa8, 0xe6, 0x50, 0x3b, 0xf2, 0x47, 0xf9, 0xe3, 0xd9, 0xb4, 0x9b, 0x6a, 0x57, 0x0e, + 0x0f, 0x32, 0x8c, 0x2c, 0xa3, 0xf8, 0x0d, 0x54, 0x8e, 0xb8, 0x9b, 0xbf, 0x53, 0xd5, 0x24, 0x36, + 0xca, 0x5f, 0x91, 0x63, 0xa2, 0x30, 0xfb, 0xde, 0xd9, 0xb9, 0x5e, 0x7a, 0x76, 0xae, 0x97, 0x9e, + 0x9f, 0xeb, 0xa5, 0x1f, 0x12, 0x5d, 0x3b, 0x4b, 0x74, 0xed, 0x59, 0xa2, 0x6b, 0xcf, 0x13, 0x5d, + 0xfb, 0x33, 0xd1, 0xb5, 0x9f, 0xfe, 0xd2, 0x4b, 0x5f, 0x57, 0xf3, 0xd2, 0xfe, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0x85, 0x06, 0x8c, 0x7f, 0xae, 0x09, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 6b8c5a23a..0b948ba1d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -49,32 +49,8 @@ type Rule struct { // Depending on the enclosing object, subresources might not be allowed. // Required. Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". - // - // +optional - Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` } -type ScopeType string - -const ( - // ClusterScope means that scope is limited to cluster-scoped objects. - // Namespace objects are cluster-scoped. - ClusterScope ScopeType = "Cluster" - // NamespacedScope means that scope is limited to namespaced objects. - NamespacedScope ScopeType = "Namespaced" - // AllScopes means that all scopes are included. - AllScopes ScopeType = "*" -) - type FailurePolicyType string const ( @@ -84,16 +60,6 @@ const ( Fail FailurePolicyType = "Fail" ) -// MatchPolicyType specifies the type of match policy -type MatchPolicyType string - -const ( - // Exact means requests should only be sent to the webhook if they exactly match a given rule - Exact MatchPolicyType = "Exact" - // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. - Equivalent MatchPolicyType = "Equivalent" -) - type SideEffectClass string const ( @@ -124,7 +90,7 @@ type ValidatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -154,7 +120,7 @@ type MutatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -170,140 +136,8 @@ type MutatingWebhookConfigurationList struct { Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` } -// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. -type ValidatingWebhook struct { - // The name of the admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // ClientConfig defines how to communicate with the hook. - // Required - ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks - // from putting the cluster in a state which cannot be recovered from without completely - // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called - // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` - - // matchPolicy defines how the "rules" list is used to match incoming requests. - // Allowed values are "Exact" or "Equivalent". - // - // - Exact: match a request only if it exactly matches a specified rule. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - // - // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - // - // Defaults to "Exact" - // +optional - MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` - - // NamespaceSelector decides whether to run the webhook on an object based - // on whether the namespace for that object matches the selector. If the - // object itself is a namespace, the matching is performed on - // object.metadata.labels. If the object is another cluster scoped resource, - // it never skips the webhook. - // - // For example, to run the webhook on any objects whose namespace is not - // associated with "runlevel" of "0" or "1"; you will set the selector as - // follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "runlevel", - // "operator": "NotIn", - // "values": [ - // "0", - // "1" - // ] - // } - // ] - // } - // - // If instead you want to only run the webhook on any objects whose - // namespace is associated with the "environment" of "prod" or "staging"; - // you will set the selector as follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "environment", - // "operator": "In", - // "values": [ - // "prod", - // "staging" - // ] - // } - // ] - // } - // - // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels - // for more examples of label selectors. - // - // Default to the empty LabelSelector, which matches everything. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` - - // ObjectSelector decides whether to run the webhook based on if the - // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the webhook, and - // is considered to match if either object matches the selector. A null - // object (oldObject in the case of create, or newObject in the case of - // delete) or an object that cannot have labels (like a - // DeploymentRollback or a PodProxyOptions object) is not considered to - // match. - // Use the object selector only if the webhook is opt-in, because end - // users may skip the admission webhook by setting the labels. - // Default to the empty LabelSelector, which matches everything. - // +optional - ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` - - // SideEffects states whether this webhookk has side effects. - // Acceptable values are: Unknown, None, Some, NoneOnDryRun - // Webhooks with side effects MUST implement a reconciliation system, since a request may be - // rejected by a future step in the admission change and the side effects therefore need to be undone. - // Requests with the dryRun attribute will be auto-rejected if they match a webhook with - // sideEffects == Unknown or Some. Defaults to Unknown. - // +optional - SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` - - // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, - // the webhook call will be ignored or the API call will fail based on the - // failure policy. - // The timeout value must be between 1 and 30 seconds. - // Default to 30 seconds. - // +optional - TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` - - // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` - // versions the Webhook expects. API server will try to use first version in - // the list which it supports. If none of the versions specified in this list - // supported by API server, validation will fail for this object. - // If a persisted webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail - // and be subject to the failure policy. - // Default to `['v1beta1']`. - // +optional - AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` -} - -// MutatingWebhook describes an admission webhook and the resources and operations it applies to. -type MutatingWebhook struct { +// Webhook describes an admission webhook and the resources and operations it applies to. +type Webhook struct { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -328,23 +162,6 @@ type MutatingWebhook struct { // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` - // matchPolicy defines how the "rules" list is used to match incoming requests. - // Allowed values are "Exact" or "Equivalent". - // - // - Exact: match a request only if it exactly matches a specified rule. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - // - // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - // - // Defaults to "Exact" - // +optional - MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` - // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on @@ -391,20 +208,6 @@ type MutatingWebhook struct { // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` - // ObjectSelector decides whether to run the webhook based on if the - // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the webhook, and - // is considered to match if either object matches the selector. A null - // object (oldObject in the case of create, or newObject in the case of - // delete) or an object that cannot have labels (like a - // DeploymentRollback or a PodProxyOptions object) is not considered to - // match. - // Use the object selector only if the webhook is opt-in, because end - // users may skip the admission webhook by setting the labels. - // Default to the empty LabelSelector, which matches everything. - // +optional - ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` - // SideEffects states whether this webhookk has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be @@ -413,58 +216,8 @@ type MutatingWebhook struct { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` - - // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, - // the webhook call will be ignored or the API call will fail based on the - // failure policy. - // The timeout value must be between 1 and 30 seconds. - // Default to 30 seconds. - // +optional - TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` - - // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` - // versions the Webhook expects. API server will try to use first version in - // the list which it supports. If none of the versions specified in this list - // supported by API server, validation will fail for this object. - // If a persisted webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail - // and be subject to the failure policy. - // Default to `['v1beta1']`. - // +optional - AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` - - // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. - // Allowed values are "Never" and "IfNeeded". - // - // Never: the webhook will not be called more than once in a single admission evaluation. - // - // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation - // if the object being admitted is modified by other admission plugins after the initial webhook call. - // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. - // Note: - // * the number of additional invocations is not guaranteed to be exactly one. - // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. - // * webhooks that use this option may be reordered to minimize the number of additional invocations. - // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. - // - // Defaults to "Never". - // +optional - ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` } -// ReinvocationPolicyType specifies what type of policy the admission hook uses. -type ReinvocationPolicyType string - -const ( - // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a - // single admission evaluation. - NeverReinvocationPolicy ReinvocationPolicyType = "Never" - // IfNeededReinvocationPolicy indicates that the webhook may be called at least one - // additional time as part of the admission evaluation if the object being admitted is - // modified by other admission plugins after the initial webhook call. - IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" -) - // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make // sure that all the tuple expansions are valid. type RuleWithOperations struct { @@ -493,7 +246,7 @@ const ( // connection with the webhook type WebhookClientConfig struct { // `url` gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` // must be specified. // // The `host` should not refer to a service running in the cluster; use @@ -526,13 +279,15 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // + // Port 443 will be used if it is open, otherwise it is an error. + // // +optional - Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` } // ServiceReference holds a reference to Service.legacy.k8s.io @@ -548,10 +303,4 @@ type ServiceReference struct { // this service. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. - // `port` should be a valid port number (1-65535, inclusive). - // +optional - Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index 39e86db97..aab917a40 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -27,25 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_MutatingWebhook = map[string]string{ - "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", - "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", - "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", - "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", -} - -func (MutatingWebhook) SwaggerDoc() map[string]string { - return map_MutatingWebhook -} - var map_MutatingWebhookConfiguration = map[string]string{ "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", @@ -71,7 +52,6 @@ var map_Rule = map[string]string{ "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", - "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", } func (Rule) SwaggerDoc() map[string]string { @@ -92,31 +72,12 @@ var map_ServiceReference = map[string]string{ "namespace": "`namespace` is the namespace of the service. Required", "name": "`name` is the name of the service. Required", "path": "`path` is an optional URL path which will be sent in any request to this service.", - "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", } func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } -var map_ValidatingWebhook = map[string]string{ - "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", - "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", - "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", -} - -func (ValidatingWebhook) SwaggerDoc() map[string]string { - return map_ValidatingWebhook -} - var map_ValidatingWebhookConfiguration = map[string]string{ "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", @@ -137,11 +98,25 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } +var map_Webhook = map[string]string{ + "": "Webhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", + "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", } func (WebhookClientConfig) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index c4570d031..c6867be12 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -25,70 +25,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.MatchPolicy != nil { - in, out := &in.MatchPolicy, &out.MatchPolicy - *out = new(MatchPolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ObjectSelector != nil { - in, out := &in.ObjectSelector, &out.ObjectSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - if in.AdmissionReviewVersions != nil { - in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ReinvocationPolicy != nil { - in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy - *out = new(ReinvocationPolicyType) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. -func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { - if in == nil { - return nil - } - out := new(MutatingWebhook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { *out = *in @@ -96,7 +32,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfigu in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]MutatingWebhook, len(*in)) + *out = make([]Webhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -126,7 +62,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]MutatingWebhookConfiguration, len(*in)) @@ -173,11 +109,6 @@ func (in *Rule) DeepCopyInto(out *Rule) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.Scope != nil { - in, out := &in.Scope, &out.Scope - *out = new(ScopeType) - **out = **in - } return } @@ -221,11 +152,6 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } return } @@ -239,65 +165,6 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.MatchPolicy != nil { - in, out := &in.MatchPolicy, &out.MatchPolicy - *out = new(MatchPolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ObjectSelector != nil { - in, out := &in.ObjectSelector, &out.ObjectSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - if in.AdmissionReviewVersions != nil { - in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. -func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { - if in == nil { - return nil - } - out := new(ValidatingWebhook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { *out = *in @@ -305,7 +172,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookCon in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]ValidatingWebhook, len(*in)) + *out = make([]Webhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -335,7 +202,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ValidatingWebhookConfiguration, len(*in)) @@ -364,6 +231,45 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 61dc97bde..1d66c2223 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 5b29f4320..eac6ef2a1 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -1439,6 +1440,24 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 2fe857458..4431ca2c3 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -32,8 +32,6 @@ const ( ) // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -69,7 +67,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement PodManagementPolicyType = "Parallel" + ParallelPodManagement = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -246,8 +244,6 @@ type StatefulSetList struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Deployment enables declarative updates for Pods and ReplicaSets. @@ -283,8 +279,7 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys - Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -658,8 +653,6 @@ type DaemonSetList struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 7e992c584..85fb159dd 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 7b7ff385c..885203fca 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -653,7 +653,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index 9072bab69..6047ed501 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 935304755..ef9aa8e09 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -1090,6 +1091,24 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2533,14 +2552,51 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2550,80 +2606,41 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue } - m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -3816,14 +3833,51 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3833,80 +3887,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index cf6039df6..d462604d7 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -55,6 +55,8 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } +// +genclient +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. @@ -111,7 +113,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement PodManagementPolicyType = "Parallel" + ParallelPodManagement = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -321,8 +323,7 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys - Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -433,7 +434,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index da1eb5996..68ebef348 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -149,7 +149,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index fb2761241..93892bfd0 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -137,7 +137,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -470,7 +470,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index 9f499869f..e93e164e1 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index fc1efbc90..72d832c34 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto +// DO NOT EDIT! /* Package v1beta2 is a generated protocol buffer package. @@ -1569,6 +1570,24 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -6090,14 +6109,51 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6107,80 +6163,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 39e07e278..e5525222a 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta2 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -57,20 +57,22 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } +// +genclient +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -115,7 +117,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement PodManagementPolicyType = "Parallel" + ParallelPodManagement = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -329,8 +331,7 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys - Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -413,7 +414,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -666,12 +667,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -679,7 +680,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -697,7 +698,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -716,12 +717,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -729,7 +730,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -740,7 +741,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -850,7 +851,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -867,7 +868,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 822158a18..627df3ab7 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -272,7 +272,7 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { @@ -290,9 +290,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 127bf095f..8a0bad22e 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -720,7 +720,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go deleted file mode 100644 index f540a0328..000000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1715 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto - - It has these top-level messages: - AuditSink - AuditSinkList - AuditSinkSpec - Policy - ServiceReference - Webhook - WebhookClientConfig - WebhookThrottleConfig -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AuditSink) Reset() { *m = AuditSink{} } -func (*AuditSink) ProtoMessage() {} -func (*AuditSink) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } -func (*AuditSinkList) ProtoMessage() {} -func (*AuditSinkList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } -func (*AuditSinkSpec) ProtoMessage() {} -func (*AuditSinkSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *Policy) Reset() { *m = Policy{} } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } -func (*WebhookThrottleConfig) ProtoMessage() {} -func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func init() { - proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") - proto.RegisterType((*AuditSinkList)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkList") - proto.RegisterType((*AuditSinkSpec)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkSpec") - proto.RegisterType((*Policy)(nil), "k8s.io.api.auditregistration.v1alpha1.Policy") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.auditregistration.v1alpha1.ServiceReference") - proto.RegisterType((*Webhook)(nil), "k8s.io.api.auditregistration.v1alpha1.Webhook") - proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") - proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") -} -func (m *AuditSink) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Policy.Size())) - n4, err := m.Policy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Webhook.Size())) - n5, err := m.Webhook.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *Policy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) - if len(m.Stages) > 0 { - for _, s := range m.Stages { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Path != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) - } - if m.Port != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - } - return i, nil -} - -func (m *Webhook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Throttle != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Throttle.Size())) - n6, err := m.Throttle.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - return i, nil -} - -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.URL != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) - } - if m.Service != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.CABundle != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) - } - return i, nil -} - -func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.QPS != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) - } - if m.Burst != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) - } - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AuditSink) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AuditSinkList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AuditSinkSpec) Size() (n int) { - var l int - _ = l - l = m.Policy.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Policy) Size() (n int) { - var l int - _ = l - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Stages) > 0 { - for _, s := range m.Stages { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceReference) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) - } - return n -} - -func (m *Webhook) Size() (n int) { - var l int - _ = l - if m.Throttle != nil { - l = m.Throttle.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WebhookClientConfig) Size() (n int) { - var l int - _ = l - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *WebhookThrottleConfig) Size() (n int) { - var l int - _ = l - if m.QPS != nil { - n += 1 + sovGenerated(uint64(*m.QPS)) - } - if m.Burst != nil { - n += 1 + sovGenerated(uint64(*m.Burst)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AuditSink) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AuditSink{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AuditSinkList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AuditSinkList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "AuditSink", "AuditSink", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AuditSinkSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AuditSinkSpec{`, - `Policy:` + strings.Replace(strings.Replace(this.Policy.String(), "Policy", "Policy", 1), `&`, ``, 1) + `,`, - `Webhook:` + strings.Replace(strings.Replace(this.Webhook.String(), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Policy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Policy{`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `Stages:` + fmt.Sprintf("%v", this.Stages) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *Webhook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Webhook{`, - `Throttle:` + strings.Replace(fmt.Sprintf("%v", this.Throttle), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookClientConfig{`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookThrottleConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookThrottleConfig{`, - `QPS:` + valueToStringGenerated(this.QPS) + `,`, - `Burst:` + valueToStringGenerated(this.Burst) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AuditSink) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditSink: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditSink: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuditSinkList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditSinkList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditSinkList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, AuditSink{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditSinkSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditSinkSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Policy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Policy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Level = Level(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stages = append(m.Stages, Stage(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Port = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Webhook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Webhook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttle", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Throttle == nil { - m.Throttle = &WebhookThrottleConfig{} - } - if err := m.Throttle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.URL = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Service == nil { - m.Service = &ServiceReference{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookThrottleConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookThrottleConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QPS", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.QPS = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Burst", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Burst = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 765 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, - 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, - 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, - 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, - 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, - 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, - 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, - 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, - 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, - 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, - 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, - 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, - 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, - 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, - 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, - 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, - 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, - 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, - 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, - 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, - 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, - 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, - 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, - 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, - 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, - 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, - 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, - 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, - 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, - 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, - 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, - 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, - 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, - 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, - 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, - 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, - 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, - 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, - 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, - 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, - 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, - 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, - 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, - 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, - 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, - 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, - 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go deleted file mode 100644 index a0fb48c30..000000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:openapi-gen=true - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Level defines the amount of information logged during auditing -type Level string - -// Valid audit levels -const ( - // LevelNone disables auditing - LevelNone Level = "None" - // LevelMetadata provides the basic level of auditing. - LevelMetadata Level = "Metadata" - // LevelRequest provides Metadata level of auditing, and additionally - // logs the request object (does not apply for non-resource requests). - LevelRequest Level = "Request" - // LevelRequestResponse provides Request level of auditing, and additionally - // logs the response object (does not apply for non-resource requests and watches). - LevelRequestResponse Level = "RequestResponse" -) - -// Stage defines the stages in request handling during which audit events may be generated. -type Stage string - -// Valid audit stages. -const ( - // The stage for events generated after the audit handler receives the request, but before it - // is delegated down the handler chain. - StageRequestReceived = "RequestReceived" - // The stage for events generated after the response headers are sent, but before the response body - // is sent. This stage is only generated for long-running requests (e.g. watch). - StageResponseStarted = "ResponseStarted" - // The stage for events generated after the response body has been completed, and no more bytes - // will be sent. - StageResponseComplete = "ResponseComplete" - // The stage for events generated when a panic occurred. - StagePanic = "Panic" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AuditSink represents a cluster level audit sink -type AuditSink struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the audit configuration spec - Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// AuditSinkSpec holds the spec for the audit sink -type AuditSinkSpec struct { - // Policy defines the policy for selecting which events should be sent to the webhook - // required - Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"` - - // Webhook to send events - // required - Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AuditSinkList is a list of AuditSink items. -type AuditSinkList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of audit configurations. - Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Policy defines the configuration of how audit events are logged -type Policy struct { - // The Level that all requests are recorded at. - // available options: None, Metadata, Request, RequestResponse - // required - Level Level `json:"level" protobuf:"bytes,1,opt,name=level"` - - // Stages is a list of stages for which events are created. - // +optional - Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"` -} - -// Webhook holds the configuration of the webhook -type Webhook struct { - // Throttle holds the options for throttling the webhook - // +optional - Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"` - - // ClientConfig holds the connection parameters for the webhook - // required - ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` -} - -// WebhookThrottleConfig holds the configuration for throttling events -type WebhookThrottleConfig struct { - // ThrottleQPS maximum number of batches per second - // default 10 QPS - // +optional - QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"` - - // ThrottleBurst is the maximum number of events sent at the same moment - // default 15 QPS - // +optional - Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"` -} - -// WebhookClientConfig contains the information to make a connection with the webhook -type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` - - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` - - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // `namespace` is the namespace of the service. - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - - // `name` is the name of the service. - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - - // `path` is an optional URL path which will be sent in any request to - // this service. - // +optional - Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. - // `port` should be a valid port number (1-65535, inclusive). - // +optional - Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` -} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 1a86f4da5..000000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AuditSink = map[string]string{ - "": "AuditSink represents a cluster level audit sink", - "spec": "Spec defines the audit configuration spec", -} - -func (AuditSink) SwaggerDoc() map[string]string { - return map_AuditSink -} - -var map_AuditSinkList = map[string]string{ - "": "AuditSinkList is a list of AuditSink items.", - "items": "List of audit configurations.", -} - -func (AuditSinkList) SwaggerDoc() map[string]string { - return map_AuditSinkList -} - -var map_AuditSinkSpec = map[string]string{ - "": "AuditSinkSpec holds the spec for the audit sink", - "policy": "Policy defines the policy for selecting which events should be sent to the webhook required", - "webhook": "Webhook to send events required", -} - -func (AuditSinkSpec) SwaggerDoc() map[string]string { - return map_AuditSinkSpec -} - -var map_Policy = map[string]string{ - "": "Policy defines the configuration of how audit events are logged", - "level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required", - "stages": "Stages is a list of stages for which events are created.", -} - -func (Policy) SwaggerDoc() map[string]string { - return map_Policy -} - -var map_ServiceReference = map[string]string{ - "": "ServiceReference holds a reference to Service.legacy.k8s.io", - "namespace": "`namespace` is the namespace of the service. Required", - "name": "`name` is the name of the service. Required", - "path": "`path` is an optional URL path which will be sent in any request to this service.", - "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", -} - -func (ServiceReference) SwaggerDoc() map[string]string { - return map_ServiceReference -} - -var map_Webhook = map[string]string{ - "": "Webhook holds the configuration of the webhook", - "throttle": "Throttle holds the options for throttling the webhook", - "clientConfig": "ClientConfig holds the connection parameters for the webhook required", -} - -func (Webhook) SwaggerDoc() map[string]string { - return map_Webhook -} - -var map_WebhookClientConfig = map[string]string{ - "": "WebhookClientConfig contains the information to make a connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", -} - -func (WebhookClientConfig) SwaggerDoc() map[string]string { - return map_WebhookClientConfig -} - -var map_WebhookThrottleConfig = map[string]string{ - "": "WebhookThrottleConfig holds the configuration for throttling events", - "qps": "ThrottleQPS maximum number of batches per second default 10 QPS", - "burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS", -} - -func (WebhookThrottleConfig) SwaggerDoc() map[string]string { - return map_WebhookThrottleConfig -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 621a19e83..000000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,229 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditSink) DeepCopyInto(out *AuditSink) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink. -func (in *AuditSink) DeepCopy() *AuditSink { - if in == nil { - return nil - } - out := new(AuditSink) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuditSink) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AuditSink, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList. -func (in *AuditSinkList) DeepCopy() *AuditSinkList { - if in == nil { - return nil - } - out := new(AuditSinkList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuditSinkList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) { - *out = *in - in.Policy.DeepCopyInto(&out.Policy) - in.Webhook.DeepCopyInto(&out.Webhook) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec. -func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec { - if in == nil { - return nil - } - out := new(AuditSinkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Policy) DeepCopyInto(out *Policy) { - *out = *in - if in.Stages != nil { - in, out := &in.Stages, &out.Stages - *out = make([]Stage, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. -func (in *Policy) DeepCopy() *Policy { - if in == nil { - return nil - } - out := new(Policy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Webhook) DeepCopyInto(out *Webhook) { - *out = *in - if in.Throttle != nil { - in, out := &in.Throttle, &out.Throttle - *out = new(WebhookThrottleConfig) - (*in).DeepCopyInto(*out) - } - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. -func (in *Webhook) DeepCopy() *Webhook { - if in == nil { - return nil - } - out := new(Webhook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. -func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { - if in == nil { - return nil - } - out := new(WebhookClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) { - *out = *in - if in.QPS != nil { - in, out := &in.QPS, &out.QPS - *out = new(int64) - **out = **in - } - if in.Burst != nil { - in, out := &in.Burst, &out.Burst - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig. -func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig { - if in == nil { - return nil - } - out := new(WebhookThrottleConfig) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 1614265bd..2d2ed2ee8 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true - package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 4e7f28d8c..2ce2e2d78 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -355,21 +356,6 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) i += copy(dAtA[i:], m.Token) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } return i, nil } @@ -408,21 +394,6 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } return i, nil } @@ -498,6 +469,24 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -591,12 +580,6 @@ func (m *TokenReviewSpec) Size() (n int) { _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } @@ -608,12 +591,6 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } @@ -721,7 +698,6 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -734,7 +710,6 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -1594,35 +1569,6 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1752,35 +1698,6 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1944,14 +1861,51 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1961,85 +1915,46 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -2172,62 +2087,61 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42, - 0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec, - 0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09, - 0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd, - 0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe, - 0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c, - 0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13, - 0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39, - 0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93, - 0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92, - 0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea, - 0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84, - 0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67, - 0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37, - 0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b, - 0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44, - 0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac, - 0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73, - 0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54, - 0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd, - 0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b, - 0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa, - 0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f, - 0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5, - 0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7, - 0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24, - 0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75, - 0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1, - 0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58, - 0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68, - 0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e, - 0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a, - 0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4, - 0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92, - 0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17, - 0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10, - 0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee, - 0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff, - 0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b, - 0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98, - 0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c, - 0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f, - 0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b, - 0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb, - 0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d, - 0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27, - 0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e, - 0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11, - 0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6, - 0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96, - 0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89, - 0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf, - 0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba, - 0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc, - 0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f, - 0xe2, 0x08, 0x00, 0x00, + // 892 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24, + 0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69, + 0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13, + 0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b, + 0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde, + 0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2, + 0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69, + 0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53, + 0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad, + 0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0, + 0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e, + 0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15, + 0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90, + 0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c, + 0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d, + 0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79, + 0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38, + 0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb, + 0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05, + 0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7, + 0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80, + 0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a, + 0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9, + 0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6, + 0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1, + 0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a, + 0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5, + 0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15, + 0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55, + 0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75, + 0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91, + 0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38, + 0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9, + 0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72, + 0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07, + 0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf, + 0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f, + 0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97, + 0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e, + 0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7, + 0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88, + 0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21, + 0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73, + 0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec, + 0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d, + 0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b, + 0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5, + 0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6, + 0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4, + 0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b, + 0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85, + 0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e, + 0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07, + 0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index c48b03691..723457a3d 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -64,13 +64,6 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` - // Audiences is a list of the identifiers that the resource server presented - // with the token identifies as. Audience-aware token authenticators will - // verify that the token was intended for at least one of the audiences in - // this list. If no audiences are provided, the audience will default to the - // audience of the Kubernetes apiserver. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -81,17 +74,6 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Audiences are audience identifiers chosen by the authenticator that are - // compatible with both the TokenReview and token. An identifier is any - // identifier in the intersection of the TokenReviewSpec audiences and the - // token's audiences. A client of the TokenReview API that sets the - // spec.audiences field should validate that a compatible audience identifier - // is returned in the status.audiences field to ensure that the TokenReview - // server is audience aware. If a TokenReview returns an empty - // status.audience field where status.authenticated is "true", the token is - // valid against the audience of the Kubernetes API server. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` @@ -155,10 +137,7 @@ type TokenRequestSpec struct { ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"` // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound object exists. - // NOTE: The API server's TokenReview endpoint will validate the - // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds - // small if you want prompt revocation. + // The token will only be valid for as long as the bound objet exists. // +optional BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"` } diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 09f6b920f..6632a5dd5 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ var map_TokenRequestSpec = map[string]string{ "": "TokenRequestSpec contains client provided parameters of a token request.", "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", - "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", + "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound objet exists.", } func (TokenRequestSpec) SwaggerDoc() map[string]string { @@ -79,9 +79,8 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", - "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -92,7 +91,6 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", - "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index aca99c42b..f36c253b2 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -141,7 +141,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) return } @@ -167,11 +167,6 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } @@ -189,11 +184,6 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index 185a2240f..e0de315d4 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true - package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 5f34e76a9..8503d212b 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -175,21 +176,6 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) i += copy(dAtA[i:], m.Token) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } return i, nil } @@ -228,21 +214,6 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } return i, nil } @@ -318,6 +289,24 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -356,12 +345,6 @@ func (m *TokenReviewSpec) Size() (n int) { _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } @@ -373,12 +356,6 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } @@ -438,7 +415,6 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -451,7 +427,6 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -764,35 +739,6 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -922,35 +868,6 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1114,14 +1031,51 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1131,85 +1085,46 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1342,47 +1257,45 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 663 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x4e, 0x14, 0x4d, - 0x14, 0xed, 0x9e, 0x1f, 0xbe, 0x99, 0x9a, 0x6f, 0x14, 0x2b, 0x31, 0x99, 0x4c, 0x62, 0x0f, 0x8e, - 0x1b, 0x12, 0xa4, 0x5a, 0x08, 0x41, 0x82, 0x2b, 0x5a, 0x89, 0xc1, 0x84, 0x98, 0x94, 0xe0, 0x42, - 0x5d, 0x58, 0xd3, 0x73, 0xe9, 0x69, 0xc7, 0xfe, 0x49, 0x55, 0xf5, 0x28, 0x3b, 0x1e, 0xc1, 0xa5, - 0x4b, 0x13, 0x9f, 0xc4, 0x1d, 0x4b, 0x96, 0x2c, 0xcc, 0x44, 0xda, 0x27, 0xf0, 0x0d, 0x4c, 0x55, - 0x17, 0xcc, 0x00, 0x31, 0xc0, 0xae, 0xeb, 0xdc, 0x7b, 0xce, 0x3d, 0xf7, 0x54, 0x17, 0x7a, 0x31, - 0x5c, 0x13, 0x24, 0x4c, 0xdc, 0x61, 0xd6, 0x03, 0x1e, 0x83, 0x04, 0xe1, 0x8e, 0x20, 0xee, 0x27, - 0xdc, 0x35, 0x05, 0x96, 0x86, 0x2e, 0xcb, 0xe4, 0x00, 0x62, 0x19, 0xfa, 0x4c, 0x86, 0x49, 0xec, - 0x8e, 0x96, 0x7a, 0x20, 0xd9, 0x92, 0x1b, 0x40, 0x0c, 0x9c, 0x49, 0xe8, 0x93, 0x94, 0x27, 0x32, - 0xc1, 0xf7, 0x0b, 0x0a, 0x61, 0x69, 0x48, 0xce, 0x53, 0x88, 0xa1, 0xb4, 0x17, 0x83, 0x50, 0x0e, - 0xb2, 0x1e, 0xf1, 0x93, 0xc8, 0x0d, 0x92, 0x20, 0x71, 0x35, 0xb3, 0x97, 0xed, 0xe9, 0x93, 0x3e, - 0xe8, 0xaf, 0x42, 0xb1, 0xbd, 0x32, 0x31, 0x11, 0x31, 0x7f, 0x10, 0xc6, 0xc0, 0xf7, 0xdd, 0x74, - 0x18, 0x28, 0x40, 0xb8, 0x11, 0x48, 0xe6, 0x8e, 0x2e, 0xf9, 0x68, 0xbb, 0xff, 0x62, 0xf1, 0x2c, - 0x96, 0x61, 0x04, 0x97, 0x08, 0xab, 0x57, 0x11, 0x84, 0x3f, 0x80, 0x88, 0x5d, 0xe4, 0x75, 0x1f, - 0x23, 0xb4, 0xf9, 0x59, 0x72, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x50, 0x35, 0x94, 0x10, 0x89, - 0x96, 0x3d, 0x57, 0x9e, 0xaf, 0x7b, 0xf5, 0x7c, 0xdc, 0xa9, 0x6e, 0x29, 0x80, 0x16, 0xf8, 0x7a, - 0xed, 0xeb, 0xb7, 0x8e, 0x75, 0xf0, 0x73, 0xce, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x4e, 0x32, 0x84, - 0x98, 0xc2, 0x28, 0x84, 0x4f, 0xf8, 0x3d, 0xaa, 0xa9, 0x65, 0xfa, 0x4c, 0xb2, 0x96, 0x3d, 0x67, - 0xcf, 0x37, 0x96, 0x1f, 0x91, 0x49, 0x98, 0x67, 0x9e, 0x48, 0x3a, 0x0c, 0x14, 0x20, 0x88, 0xea, - 0x26, 0xa3, 0x25, 0xf2, 0xb2, 0xf7, 0x01, 0x7c, 0xb9, 0x0d, 0x92, 0x79, 0xf8, 0x70, 0xdc, 0xb1, - 0xf2, 0x71, 0x07, 0x4d, 0x30, 0x7a, 0xa6, 0x8a, 0x77, 0x50, 0x45, 0xa4, 0xe0, 0xb7, 0x4a, 0x5a, - 0x7d, 0x99, 0x5c, 0x79, 0x55, 0x64, 0xca, 0xdf, 0xab, 0x14, 0x7c, 0xef, 0x7f, 0xa3, 0x5f, 0x51, - 0x27, 0xaa, 0xd5, 0xf0, 0x3b, 0x34, 0x23, 0x24, 0x93, 0x99, 0x68, 0x95, 0xb5, 0xee, 0xca, 0x0d, - 0x75, 0x35, 0xd7, 0xbb, 0x65, 0x94, 0x67, 0x8a, 0x33, 0x35, 0x9a, 0x5d, 0x1f, 0xdd, 0xbe, 0x60, - 0x02, 0x3f, 0x40, 0x55, 0xa9, 0x20, 0x9d, 0x52, 0xdd, 0x6b, 0x1a, 0x66, 0xb5, 0xe8, 0x2b, 0x6a, - 0x78, 0x01, 0xd5, 0x59, 0xd6, 0x0f, 0x21, 0xf6, 0x41, 0xb4, 0x4a, 0xfa, 0x32, 0x9a, 0xf9, 0xb8, - 0x53, 0xdf, 0x38, 0x05, 0xe9, 0xa4, 0xde, 0xfd, 0x63, 0xa3, 0x3b, 0x97, 0x2c, 0xe1, 0x27, 0xa8, - 0x39, 0x65, 0x1f, 0xfa, 0x7a, 0x5e, 0xcd, 0xbb, 0x6b, 0xe6, 0x35, 0x37, 0xa6, 0x8b, 0xf4, 0x7c, - 0x2f, 0xde, 0x46, 0x95, 0x4c, 0x00, 0x37, 0x59, 0x2f, 0x5c, 0x23, 0x93, 0x5d, 0x01, 0x7c, 0x2b, - 0xde, 0x4b, 0x26, 0x21, 0x2b, 0x84, 0x6a, 0x19, 0xb5, 0x33, 0x70, 0x9e, 0x70, 0x9d, 0xf1, 0xd4, - 0xce, 0x9b, 0x0a, 0xa4, 0x45, 0xed, 0xfc, 0xce, 0x95, 0x2b, 0x76, 0xfe, 0x51, 0x42, 0xb5, 0xd3, - 0x91, 0xf8, 0x21, 0xaa, 0xa9, 0x31, 0x31, 0x8b, 0xc0, 0xa4, 0x3a, 0x6b, 0x26, 0xe8, 0x1e, 0x85, - 0xd3, 0xb3, 0x0e, 0x7c, 0x0f, 0x95, 0xb3, 0xb0, 0xaf, 0x57, 0xab, 0x7b, 0x0d, 0xd3, 0x58, 0xde, - 0xdd, 0x7a, 0x46, 0x15, 0x8e, 0xbb, 0x68, 0x26, 0xe0, 0x49, 0x96, 0xaa, 0x1f, 0x42, 0x79, 0x40, - 0xea, 0x5a, 0x9f, 0x6b, 0x84, 0x9a, 0x0a, 0x7e, 0x8b, 0xaa, 0xa0, 0x5e, 0x8d, 0xb6, 0xd9, 0x58, - 0x5e, 0xbd, 0x41, 0x3e, 0x44, 0x3f, 0xb7, 0xcd, 0x58, 0xf2, 0xfd, 0xa9, 0x1c, 0x14, 0x46, 0x0b, - 0xcd, 0x76, 0x60, 0x9e, 0xa4, 0xee, 0xc1, 0xb3, 0xa8, 0x3c, 0x84, 0xfd, 0x62, 0x2d, 0xaa, 0x3e, - 0xf1, 0x53, 0x54, 0x1d, 0xa9, 0xd7, 0x6a, 0x2e, 0x67, 0xf1, 0x1a, 0xc3, 0x27, 0x4f, 0x9c, 0x16, - 0xdc, 0xf5, 0xd2, 0x9a, 0xed, 0x2d, 0x1e, 0x9e, 0x38, 0xd6, 0xd1, 0x89, 0x63, 0x1d, 0x9f, 0x38, - 0xd6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, 0x8e, 0x7d, 0x9c, 0x3b, 0xf6, 0xaf, 0xdc, - 0xb1, 0xbf, 0xfc, 0x76, 0xac, 0x37, 0xff, 0x19, 0x91, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, - 0xd6, 0x32, 0x28, 0x68, 0x05, 0x00, 0x00, + // 635 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x4f, 0xd4, 0x40, + 0x14, 0x6e, 0xf7, 0x07, 0xee, 0xce, 0x8a, 0xe2, 0x24, 0x26, 0x9b, 0x4d, 0xec, 0xae, 0xeb, 0x85, + 0x44, 0x99, 0x0a, 0x21, 0x48, 0xf0, 0x64, 0x95, 0x18, 0x4c, 0x88, 0xc9, 0x08, 0x1e, 0xd4, 0x83, + 0xb3, 0xdd, 0x47, 0xb7, 0xae, 0xed, 0x34, 0xd3, 0x69, 0x95, 0x1b, 0x7f, 0x82, 0x47, 0x8f, 0x26, + 0xfe, 0x25, 0x26, 0x1e, 0x38, 0x72, 0xe4, 0x60, 0x88, 0xd4, 0x7f, 0xc4, 0xcc, 0x74, 0x64, 0x17, + 0x88, 0x01, 0x6e, 0xf3, 0xbe, 0xf7, 0xbe, 0x6f, 0xde, 0xf7, 0x66, 0x1e, 0x7a, 0x31, 0x5e, 0x4d, + 0x49, 0xc8, 0xdd, 0x71, 0x36, 0x00, 0x11, 0x83, 0x84, 0xd4, 0xcd, 0x21, 0x1e, 0x72, 0xe1, 0x9a, + 0x04, 0x4b, 0x42, 0x97, 0x65, 0x72, 0x04, 0xb1, 0x0c, 0x7d, 0x26, 0x43, 0x1e, 0xbb, 0xf9, 0xe2, + 0x00, 0x24, 0x5b, 0x74, 0x03, 0x88, 0x41, 0x30, 0x09, 0x43, 0x92, 0x08, 0x2e, 0x39, 0xbe, 0x5b, + 0x52, 0x08, 0x4b, 0x42, 0x72, 0x9a, 0x42, 0x0c, 0xa5, 0xb3, 0x10, 0x84, 0x72, 0x94, 0x0d, 0x88, + 0xcf, 0x23, 0x37, 0xe0, 0x01, 0x77, 0x35, 0x73, 0x90, 0xed, 0xe8, 0x48, 0x07, 0xfa, 0x54, 0x2a, + 0x76, 0x96, 0x27, 0x4d, 0x44, 0xcc, 0x1f, 0x85, 0x31, 0x88, 0x5d, 0x37, 0x19, 0x07, 0x0a, 0x48, + 0xdd, 0x08, 0x24, 0x73, 0xf3, 0x73, 0x7d, 0x74, 0xdc, 0xff, 0xb1, 0x44, 0x16, 0xcb, 0x30, 0x82, + 0x73, 0x84, 0x95, 0x8b, 0x08, 0xa9, 0x3f, 0x82, 0x88, 0x9d, 0xe5, 0xf5, 0x1f, 0x21, 0xb4, 0xfe, + 0x59, 0x0a, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x51, 0x3d, 0x94, 0x10, 0xa5, 0x6d, 0xbb, 0x57, + 0x9d, 0x6f, 0x7a, 0xcd, 0xe2, 0xa8, 0x5b, 0xdf, 0x50, 0x00, 0x2d, 0xf1, 0xb5, 0xc6, 0xd7, 0x6f, + 0x5d, 0x6b, 0xef, 0x57, 0xcf, 0xea, 0x7f, 0xaf, 0xa0, 0xd6, 0x16, 0x1f, 0x43, 0x4c, 0x21, 0x0f, + 0xe1, 0x13, 0x7e, 0x8f, 0x1a, 0xca, 0xcc, 0x90, 0x49, 0xd6, 0xb6, 0x7b, 0xf6, 0x7c, 0x6b, 0xe9, + 0x21, 0x99, 0x0c, 0xf3, 0xa4, 0x27, 0x92, 0x8c, 0x03, 0x05, 0xa4, 0x44, 0x55, 0x93, 0x7c, 0x91, + 0xbc, 0x1c, 0x7c, 0x00, 0x5f, 0x6e, 0x82, 0x64, 0x1e, 0xde, 0x3f, 0xea, 0x5a, 0xc5, 0x51, 0x17, + 0x4d, 0x30, 0x7a, 0xa2, 0x8a, 0xb7, 0x50, 0x2d, 0x4d, 0xc0, 0x6f, 0x57, 0xb4, 0xfa, 0x12, 0xb9, + 0xf0, 0xa9, 0xc8, 0x54, 0x7f, 0xaf, 0x12, 0xf0, 0xbd, 0xeb, 0x46, 0xbf, 0xa6, 0x22, 0xaa, 0xd5, + 0xf0, 0x3b, 0x34, 0x93, 0x4a, 0x26, 0xb3, 0xb4, 0x5d, 0xd5, 0xba, 0xcb, 0x57, 0xd4, 0xd5, 0x5c, + 0xef, 0x86, 0x51, 0x9e, 0x29, 0x63, 0x6a, 0x34, 0xfb, 0x2b, 0xe8, 0xe6, 0x99, 0x26, 0xf0, 0x3d, + 0x54, 0x97, 0x0a, 0xd2, 0x53, 0x6a, 0x7a, 0xb3, 0x86, 0x59, 0x2f, 0xeb, 0xca, 0x5c, 0xff, 0xa7, + 0x8d, 0x6e, 0x9d, 0xbb, 0x05, 0x3f, 0x46, 0xb3, 0x53, 0x1d, 0xc1, 0x50, 0x4b, 0x34, 0xbc, 0xdb, + 0x46, 0x62, 0xf6, 0xc9, 0x74, 0x92, 0x9e, 0xae, 0xc5, 0x9b, 0xa8, 0x96, 0xa5, 0x20, 0xcc, 0xf8, + 0xee, 0x5f, 0xc2, 0xe6, 0x76, 0x0a, 0x62, 0x23, 0xde, 0xe1, 0x93, 0xb9, 0x29, 0x84, 0x6a, 0x19, + 0x65, 0x03, 0x84, 0xe0, 0x42, 0x8f, 0x6d, 0xca, 0xc6, 0xba, 0x02, 0x69, 0x99, 0xeb, 0xff, 0xa8, + 0xa0, 0xc6, 0x3f, 0x15, 0xfc, 0x00, 0x35, 0x14, 0x33, 0x66, 0x11, 0x18, 0xef, 0x73, 0x86, 0xa4, + 0x6b, 0x14, 0x4e, 0x4f, 0x2a, 0xf0, 0x1d, 0x54, 0xcd, 0xc2, 0xa1, 0xee, 0xb6, 0xe9, 0xb5, 0x4c, + 0x61, 0x75, 0x7b, 0xe3, 0x19, 0x55, 0x38, 0xee, 0xa3, 0x99, 0x40, 0xf0, 0x2c, 0x51, 0xcf, 0xa6, + 0xbe, 0x2a, 0x52, 0xc3, 0x7f, 0xae, 0x11, 0x6a, 0x32, 0xf8, 0x2d, 0xaa, 0x83, 0xfa, 0xdb, 0xed, + 0x5a, 0xaf, 0x3a, 0xdf, 0x5a, 0x5a, 0xb9, 0x82, 0x65, 0xa2, 0x97, 0x62, 0x3d, 0x96, 0x62, 0x77, + 0xca, 0x9a, 0xc2, 0x68, 0xa9, 0xd9, 0x09, 0xcc, 0xe2, 0xe8, 0x1a, 0x3c, 0x87, 0xaa, 0x63, 0xd8, + 0x2d, 0x6d, 0x51, 0x75, 0xc4, 0x4f, 0x51, 0x3d, 0x57, 0x3b, 0x65, 0xe6, 0xbd, 0x70, 0x89, 0xcb, + 0x27, 0x8b, 0x48, 0x4b, 0xee, 0x5a, 0x65, 0xd5, 0xf6, 0x16, 0xf6, 0x8f, 0x1d, 0xeb, 0xe0, 0xd8, + 0xb1, 0x0e, 0x8f, 0x1d, 0x6b, 0xaf, 0x70, 0xec, 0xfd, 0xc2, 0xb1, 0x0f, 0x0a, 0xc7, 0x3e, 0x2c, + 0x1c, 0xfb, 0x77, 0xe1, 0xd8, 0x5f, 0xfe, 0x38, 0xd6, 0x9b, 0x6b, 0x46, 0xe4, 0x6f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x39, 0x00, 0xe7, 0xfa, 0x0e, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 0b6cba822..a90949dc3 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -48,13 +48,6 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` - // Audiences is a list of the identifiers that the resource server presented - // with the token identifies as. Audience-aware token authenticators will - // verify that the token was intended for at least one of the audiences in - // this list. If no audiences are provided, the audience will default to the - // audience of the Kubernetes apiserver. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -65,17 +58,6 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Audiences are audience identifiers chosen by the authenticator that are - // compatible with both the TokenReview and token. An identifier is any - // identifier in the intersection of the TokenReviewSpec audiences and the - // token's audiences. A client of the TokenReview API that sets the - // spec.audiences field should validate that a compatible audience identifier - // is returned in the status.audiences field to ensure that the TokenReview - // server is audience aware. If a TokenReview returns an empty - // status.audience field where status.authenticated is "true", the token is - // valid against the audience of the Kubernetes API server. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 8c9acfb5b..968999d1e 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -38,9 +38,8 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", - "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -51,7 +50,6 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", - "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index a5d82a810..3a5f6d5a9 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -49,7 +49,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) return } @@ -75,11 +75,6 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } @@ -97,11 +92,6 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index cf100e6b7..c06b798df 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io - package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index fc6a25f62..e9145af02 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -794,6 +795,24 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2869,14 +2888,51 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2886,85 +2942,46 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index 7046f1110..ea4f802e2 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io - package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 7cce98eb1..75ee6cf91 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -794,6 +795,24 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2869,14 +2888,51 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2886,85 +2942,46 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 8c9c09b5c..9c3be845f 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 950e93340..47a46a557 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -995,6 +996,24 @@ func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 72ac97271..a6e874f3d 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -196,8 +196,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -207,8 +207,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index ddb601128..3fda47d54 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index 2cc9f11ea..da9789e5c 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index b6a5f3562..bee94129d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +// DO NOT EDIT! /* Package v2beta1 is a generated protocol buffer package. @@ -915,6 +916,24 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 589408ace..411b817d0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -197,8 +197,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -208,8 +208,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index c51e05b8f..2ec7e6156 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go index 6d275f6d9..7c7d2b6f1 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta2 // import "k8s.io/api/autoscaling/v2beta2" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index 816fea9d5..be752a140 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +// DO NOT EDIT! /* Package v2beta2 is a generated protocol buffer package. @@ -965,6 +966,24 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index 2dffa3336..a6a95653a 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -126,7 +126,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index c4a8db6e7..04491807f 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 3aa32b578..097a6ff28 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -342,6 +343,24 @@ func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index beba55ace..88cb01678 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -75,7 +75,7 @@ func (in *JobCondition) DeepCopy() *JobCondition { func (in *JobList) DeepCopyInto(out *JobList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Job, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 258ff028c..43020ed05 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 36342a3af..ece2204f9 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -335,6 +336,24 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 7c9dcb742..1c8bc4478 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index 3044b0c62..f4ed01ad8 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 4d9ba5c00..6ab41ebbc 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto +// DO NOT EDIT! /* Package v2alpha1 is a generated protocol buffer package. @@ -335,6 +336,24 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index 1b03f6745..20d87e7e7 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index 9055248b9..fb23aadb0 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io - package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 19bf225fa..eda159900 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -377,6 +378,24 @@ func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1202,14 +1221,51 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1219,85 +1275,46 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index b3e0aeb50..1b103f155 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -73,7 +73,7 @@ func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequ func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CertificateSigningRequest, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go deleted file mode 100644 index 8f9f24d04..000000000 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Lease defines a lease concept. -type Lease struct { - metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// LeaseSpec is a specification of a Lease. -type LeaseSpec struct { - // holderIdentity contains the identity of the holder of a current lease. - // +optional - HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` - // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last - // observed RenewTime. - // +optional - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` - // acquireTime is a time when the current lease was acquired. - // +optional - AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"` - // renewTime is a time when the current holder of a lease has last - // updated the lease. - // +optional - RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"` - // leaseTransitions is the number of transitions of a lease between - // holders. - // +optional - LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LeaseList is a list of Lease objects. -type LeaseList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go deleted file mode 100644 index bd02ad5da..000000000 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Lease = map[string]string{ - "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Lease) SwaggerDoc() map[string]string { - return map_Lease -} - -var map_LeaseList = map[string]string{ - "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (LeaseList) SwaggerDoc() map[string]string { - return map_LeaseList -} - -var map_LeaseSpec = map[string]string{ - "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", - "acquireTime": "acquireTime is a time when the current lease was acquired.", - "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", - "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", -} - -func (LeaseSpec) SwaggerDoc() map[string]string { - return map_LeaseSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go deleted file mode 100644 index 2dd7eddbc..000000000 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lease) DeepCopyInto(out *Lease) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. -func (in *Lease) DeepCopy() *Lease { - if in == nil { - return nil - } - out := new(Lease) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Lease) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaseList) DeepCopyInto(out *LeaseList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Lease, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. -func (in *LeaseList) DeepCopy() *LeaseList { - if in == nil { - return nil - } - out := new(LeaseList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LeaseList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { - *out = *in - if in.HolderIdentity != nil { - in, out := &in.HolderIdentity, &out.HolderIdentity - *out = new(string) - **out = **in - } - if in.LeaseDurationSeconds != nil { - in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds - *out = new(int32) - **out = **in - } - if in.AcquireTime != nil { - in, out := &in.AcquireTime, &out.AcquireTime - *out = (*in).DeepCopy() - } - if in.RenewTime != nil { - in, out := &in.RenewTime, &out.RenewTime - *out = (*in).DeepCopy() - } - if in.LeaseTransitions != nil { - in, out := &in.LeaseTransitions, &out.LeaseTransitions - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. -func (in *LeaseSpec) DeepCopy() *LeaseSpec { - if in == nil { - return nil - } - out := new(LeaseSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go index 304732d59..fecb513fc 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=coordination.k8s.io - package v1beta1 // import "k8s.io/api/coordination/v1beta1" diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index aa57e9dd6..6c2dbd91f 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -195,6 +196,24 @@ func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index de6962137..a628ac19b 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *Lease) DeepCopyObject() runtime.Object { func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Lease, len(*in)) diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index edc9b4d60..16a0cfced 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -78,29 +78,4 @@ const ( // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" - - // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that - // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') - // of the last change, of some Pod or Service object, that triggered the endpoints object change. - // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints - // controller at T1, and the Endpoints object was changed at T2, the - // EndpointsLastChangeTriggerTime would be set to T0. - // - // The "endpoints change trigger" here means any Pod or Service change that resulted in the - // Endpoints object change. - // - // Given the definition of the "endpoints change trigger", please note that this annotation will - // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the - // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's - // already set). - // - // This annotation will be used to compute the in-cluster network programming latency SLI, see - // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md - EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" - - // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated - // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. - // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or - // CSI Backend for a volume plugin on a specific node. - MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" ) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 1bdf0b25b..96994c624 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -16,7 +16,6 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 79ecb2610..b569ea84d 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -33,7 +34,6 @@ limitations under the License. AzureFileVolumeSource Binding CSIPersistentVolumeSource - CSIVolumeSource Capabilities CephFSPersistentVolumeSource CephFSVolumeSource @@ -82,7 +82,6 @@ limitations under the License. FlockerVolumeSource GCEPersistentDiskVolumeSource GitRepoVolumeSource - GlusterfsPersistentVolumeSource GlusterfsVolumeSource HTTPGetAction HTTPHeader @@ -221,7 +220,6 @@ limitations under the License. VolumeSource VsphereVirtualDiskVolumeSource WeightedPodAffinityTerm - WindowsSecurityContextOptions */ package v1 @@ -295,826 +293,810 @@ func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } -func (*CSIVolumeSource) ProtoMessage() {} -func (*CSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptorGenerated, []int{10} } func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } func (*CinderPersistentVolumeSource) ProtoMessage() {} func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptorGenerated, []int{12} } func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{23} + return fileDescriptorGenerated, []int{22} } func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptorGenerated, []int{30} } func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{37} + return fileDescriptorGenerated, []int{36} } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{53} + return fileDescriptorGenerated, []int{52} } func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} + return fileDescriptorGenerated, []int{55} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } - -func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } -func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} -func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{58} -} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{65} + return fileDescriptorGenerated, []int{63} } func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{94} + return fileDescriptorGenerated, []int{92} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} + return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} + return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{111} + return fileDescriptorGenerated, []int{109} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{112} + return fileDescriptorGenerated, []int{110} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} + return fileDescriptorGenerated, []int{136} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{142} + return fileDescriptorGenerated, []int{140} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} + return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} + return fileDescriptorGenerated, []int{145} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{148} + return fileDescriptorGenerated, []int{146} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{149} + return fileDescriptorGenerated, []int{147} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{157} + return fileDescriptorGenerated, []int{155} } func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{160} + return fileDescriptorGenerated, []int{158} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{173} + return fileDescriptorGenerated, []int{171} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{180} + return fileDescriptorGenerated, []int{178} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} + return fileDescriptorGenerated, []int{184} } func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{188} + return fileDescriptorGenerated, []int{186} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{194} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{195} + return fileDescriptorGenerated, []int{193} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{196} -} - -func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } -func (*WindowsSecurityContextOptions) ProtoMessage() {} -func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{197} + return fileDescriptorGenerated, []int{194} } func init() { @@ -1127,7 +1109,6 @@ func init() { proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") - proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") @@ -1176,7 +1157,6 @@ func init() { proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") @@ -1315,7 +1295,6 @@ func init() { proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") - proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -1707,86 +1686,6 @@ func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { } i += n8 } - if m.ControllerExpandSecretRef != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerExpandSecretRef.Size())) - n9, err := m.ControllerExpandSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} - -func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - if m.ReadOnly != nil { - dAtA[i] = 0x10 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.FSType != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x22 - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n10, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } return i, nil } @@ -1884,11 +1783,11 @@ func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) + n9, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n9 } dAtA[i] = 0x30 i++ @@ -1947,11 +1846,11 @@ func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) + n10, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n10 } dAtA[i] = 0x30 i++ @@ -1999,11 +1898,11 @@ func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n13, err := m.SecretRef.MarshalTo(dAtA[i:]) + n11, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n11 } return i, nil } @@ -2043,11 +1942,11 @@ func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n14, err := m.SecretRef.MarshalTo(dAtA[i:]) + n12, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n12 } return i, nil } @@ -2127,11 +2026,11 @@ func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n13 if len(m.Conditions) > 0 { for _, msg := range m.Conditions { dAtA[i] = 0x12 @@ -2165,11 +2064,11 @@ func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n16, err := m.ListMeta.MarshalTo(dAtA[i:]) + n14, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n14 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2203,11 +2102,11 @@ func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n17, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n15 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -2279,11 +2178,11 @@ func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n16, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n16 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -2315,11 +2214,11 @@ func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n17 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -2355,11 +2254,11 @@ func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n20, err := m.ListMeta.MarshalTo(dAtA[i:]) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n18 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2431,11 +2330,11 @@ func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n19 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2479,11 +2378,11 @@ func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n22, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n20 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2598,11 +2497,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n23, err := m.Resources.MarshalTo(dAtA[i:]) + n21, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n21 if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { dAtA[i] = 0x4a @@ -2619,31 +2518,31 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n24, err := m.LivenessProbe.MarshalTo(dAtA[i:]) + n22, err := m.LivenessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n22 } if m.ReadinessProbe != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n25, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) + n23, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n23 } if m.Lifecycle != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n26, err := m.Lifecycle.MarshalTo(dAtA[i:]) + n24, err := m.Lifecycle.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n24 } dAtA[i] = 0x6a i++ @@ -2657,11 +2556,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n27, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n25, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n25 } dAtA[i] = 0x80 i++ @@ -2821,31 +2720,31 @@ func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n28, err := m.Waiting.MarshalTo(dAtA[i:]) + n26, err := m.Waiting.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n26 } if m.Running != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n29, err := m.Running.MarshalTo(dAtA[i:]) + n27, err := m.Running.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n27 } if m.Terminated != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n30, err := m.Terminated.MarshalTo(dAtA[i:]) + n28, err := m.Terminated.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n28 } return i, nil } @@ -2868,11 +2767,11 @@ func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n31, err := m.StartedAt.MarshalTo(dAtA[i:]) + n29, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n29 return i, nil } @@ -2908,19 +2807,19 @@ func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n32, err := m.StartedAt.MarshalTo(dAtA[i:]) + n30, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n30 dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n33, err := m.FinishedAt.MarshalTo(dAtA[i:]) + n31, err := m.FinishedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n31 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) @@ -2976,19 +2875,19 @@ func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n34, err := m.State.MarshalTo(dAtA[i:]) + n32, err := m.State.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n32 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n35, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + n33, err := m.LastTerminationState.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n33 dAtA[i] = 0x20 i++ if m.Ready { @@ -3089,21 +2988,21 @@ func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n36, err := m.FieldRef.MarshalTo(dAtA[i:]) + n34, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n34 } if m.ResourceFieldRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n37, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n35, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n35 } if m.Mode != nil { dAtA[i] = 0x20 @@ -3171,11 +3070,11 @@ func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n38, err := m.SizeLimit.MarshalTo(dAtA[i:]) + n36, err := m.SizeLimit.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n36 } return i, nil } @@ -3203,11 +3102,11 @@ func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n39, err := m.TargetRef.MarshalTo(dAtA[i:]) + n37, err := m.TargetRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n37 } dAtA[i] = 0x1a i++ @@ -3323,11 +3222,11 @@ func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n38, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n38 if len(m.Subsets) > 0 { for _, msg := range m.Subsets { dAtA[i] = 0x12 @@ -3361,11 +3260,11 @@ func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n41, err := m.ListMeta.MarshalTo(dAtA[i:]) + n39, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n39 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3404,21 +3303,21 @@ func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n42, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n40, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n40 } if m.SecretRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n43, err := m.SecretRef.MarshalTo(dAtA[i:]) + n41, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n41 } return i, nil } @@ -3450,11 +3349,11 @@ func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n44, err := m.ValueFrom.MarshalTo(dAtA[i:]) + n42, err := m.ValueFrom.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n42 } return i, nil } @@ -3478,41 +3377,41 @@ func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n45, err := m.FieldRef.MarshalTo(dAtA[i:]) + n43, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n43 } if m.ResourceFieldRef != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n46, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n44, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n44 } if m.ConfigMapKeyRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n47, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) + n45, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n45 } if m.SecretKeyRef != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n48, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + n46, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n46 } return i, nil } @@ -3535,19 +3434,19 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n47, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n47 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n50, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + n48, err := m.InvolvedObject.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n48 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -3559,27 +3458,27 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n51, err := m.Source.MarshalTo(dAtA[i:]) + n49, err := m.Source.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n49 dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n52, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + n50, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n50 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n53, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + n51, err := m.LastTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n51 dAtA[i] = 0x40 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) @@ -3590,20 +3489,20 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n54, err := m.EventTime.MarshalTo(dAtA[i:]) + n52, err := m.EventTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n52 if m.Series != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n55, err := m.Series.MarshalTo(dAtA[i:]) + n53, err := m.Series.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n53 } dAtA[i] = 0x62 i++ @@ -3613,11 +3512,11 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n56, err := m.Related.MarshalTo(dAtA[i:]) + n54, err := m.Related.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n54 } dAtA[i] = 0x72 i++ @@ -3648,11 +3547,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n57, err := m.ListMeta.MarshalTo(dAtA[i:]) + n55, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n55 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3689,11 +3588,11 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n58, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + n56, err := m.LastObservedTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n56 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) @@ -3852,11 +3751,11 @@ func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n59, err := m.SecretRef.MarshalTo(dAtA[i:]) + n57, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n57 } dAtA[i] = 0x20 i++ @@ -3918,11 +3817,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n60, err := m.SecretRef.MarshalTo(dAtA[i:]) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n58 } dAtA[i] = 0x20 i++ @@ -4050,46 +3949,6 @@ func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.EndpointsNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) - i += copy(dAtA[i:], *m.EndpointsNamespace) - } - return i, nil -} - func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4146,11 +4005,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n61, err := m.Port.MarshalTo(dAtA[i:]) + n59, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n59 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -4219,31 +4078,31 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n62, err := m.Exec.MarshalTo(dAtA[i:]) + n60, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n60 } if m.HTTPGet != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n63, err := m.HTTPGet.MarshalTo(dAtA[i:]) + n61, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n61 } if m.TCPSocket != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n64, err := m.TCPSocket.MarshalTo(dAtA[i:]) + n62, err := m.TCPSocket.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n62 } return i, nil } @@ -4382,11 +4241,11 @@ func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n65, err := m.SecretRef.MarshalTo(dAtA[i:]) + n63, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n63 } dAtA[i] = 0x58 i++ @@ -4474,11 +4333,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n66, err := m.SecretRef.MarshalTo(dAtA[i:]) + n64, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n64 } dAtA[i] = 0x58 i++ @@ -4547,21 +4406,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n67, err := m.PostStart.MarshalTo(dAtA[i:]) + n65, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n65 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n68, err := m.PreStop.MarshalTo(dAtA[i:]) + n66, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n66 } return i, nil } @@ -4584,19 +4443,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n69, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n67, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n67 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n70, err := m.Spec.MarshalTo(dAtA[i:]) + n68, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n68 return i, nil } @@ -4643,11 +4502,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) + n69, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n69 } } if len(m.Min) > 0 { @@ -4674,11 +4533,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) + n70, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n70 } } if len(m.Default) > 0 { @@ -4705,11 +4564,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) + n71, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n71 } } if len(m.DefaultRequest) > 0 { @@ -4736,11 +4595,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n74, err := (&v).MarshalTo(dAtA[i:]) + n72, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n72 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -4767,11 +4626,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n75, err := (&v).MarshalTo(dAtA[i:]) + n73, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n73 } } return i, nil @@ -4795,11 +4654,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n76, err := m.ListMeta.MarshalTo(dAtA[i:]) + n74, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n74 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4863,11 +4722,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n77, err := m.ListMeta.MarshalTo(dAtA[i:]) + n75, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n75 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5041,27 +4900,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n78, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n76, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n76 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n79, err := m.Spec.MarshalTo(dAtA[i:]) + n77, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n77 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n80, err := m.Status.MarshalTo(dAtA[i:]) + n78, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n78 return i, nil } @@ -5083,11 +4942,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n81, err := m.ListMeta.MarshalTo(dAtA[i:]) + n79, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n81 + i += n79 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5176,27 +5035,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n82, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n80, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n82 + i += n80 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n83, err := m.Spec.MarshalTo(dAtA[i:]) + n81, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n83 + i += n81 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n84, err := m.Status.MarshalTo(dAtA[i:]) + n82, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n82 return i, nil } @@ -5245,11 +5104,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n85, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n83, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n85 + i += n83 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -5292,19 +5151,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n86, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + n84, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n86 + i += n84 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n87, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n85, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n85 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -5335,11 +5194,11 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n88, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n86, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n88 + i += n86 } return i, nil } @@ -5363,31 +5222,31 @@ func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n89, err := m.Assigned.MarshalTo(dAtA[i:]) + n87, err := m.Assigned.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n89 + i += n87 } if m.Active != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n90, err := m.Active.MarshalTo(dAtA[i:]) + n88, err := m.Active.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n90 + i += n88 } if m.LastKnownGood != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n91, err := m.LastKnownGood.MarshalTo(dAtA[i:]) + n89, err := m.LastKnownGood.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n89 } dAtA[i] = 0x22 i++ @@ -5414,11 +5273,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n92, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n90, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n90 return i, nil } @@ -5440,11 +5299,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n93, err := m.ListMeta.MarshalTo(dAtA[i:]) + n91, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n93 + i += n91 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5521,11 +5380,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n94, err := (&v).MarshalTo(dAtA[i:]) + n92, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n94 + i += n92 } } return i, nil @@ -5695,11 +5554,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n95, err := m.ConfigSource.MarshalTo(dAtA[i:]) + n93, err := m.ConfigSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n95 + i += n93 } return i, nil } @@ -5743,11 +5602,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n96, err := (&v).MarshalTo(dAtA[i:]) + n94, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n96 + i += n94 } } if len(m.Allocatable) > 0 { @@ -5774,11 +5633,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n97, err := (&v).MarshalTo(dAtA[i:]) + n95, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n97 + i += n95 } } dAtA[i] = 0x1a @@ -5812,19 +5671,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n98, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + n96, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n98 + i += n96 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n99, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n97, err := m.NodeInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n99 + i += n97 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x42 @@ -5868,11 +5727,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n100, err := m.Config.MarshalTo(dAtA[i:]) + n98, err := m.Config.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n100 + i += n98 } return i, nil } @@ -6025,27 +5884,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n101, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n99, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n101 + i += n99 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n102, err := m.Spec.MarshalTo(dAtA[i:]) + n100, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n102 + i += n100 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n103, err := m.Status.MarshalTo(dAtA[i:]) + n101, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n103 + i += n101 return i, nil } @@ -6067,27 +5926,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n104, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n102, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n104 + i += n102 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n105, err := m.Spec.MarshalTo(dAtA[i:]) + n103, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n105 + i += n103 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n106, err := m.Status.MarshalTo(dAtA[i:]) + n104, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n106 + i += n104 return i, nil } @@ -6117,19 +5976,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n107, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n105, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n107 + i += n105 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n108, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n106, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n108 + i += n106 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -6159,11 +6018,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n109, err := m.ListMeta.MarshalTo(dAtA[i:]) + n107, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n109 + i += n107 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6212,11 +6071,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n110, err := m.Resources.MarshalTo(dAtA[i:]) + n108, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n110 + i += n108 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) @@ -6225,11 +6084,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n111, err := m.Selector.MarshalTo(dAtA[i:]) + n109, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n111 + i += n109 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -6247,11 +6106,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n112, err := m.DataSource.MarshalTo(dAtA[i:]) + n110, err := m.DataSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n112 + i += n110 } return i, nil } @@ -6314,11 +6173,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n113, err := (&v).MarshalTo(dAtA[i:]) + n111, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n113 + i += n111 } } if len(m.Conditions) > 0 { @@ -6384,11 +6243,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n114, err := m.ListMeta.MarshalTo(dAtA[i:]) + n112, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n114 + i += n112 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6423,151 +6282,151 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n115, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + n113, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n115 + i += n113 } if m.AWSElasticBlockStore != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n116, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n114, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n116 + i += n114 } if m.HostPath != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n117, err := m.HostPath.MarshalTo(dAtA[i:]) + n115, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n117 + i += n115 } if m.Glusterfs != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n118, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n116, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n118 + i += n116 } if m.NFS != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n119, err := m.NFS.MarshalTo(dAtA[i:]) + n117, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n119 + i += n117 } if m.RBD != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n120, err := m.RBD.MarshalTo(dAtA[i:]) + n118, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n120 + i += n118 } if m.ISCSI != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n121, err := m.ISCSI.MarshalTo(dAtA[i:]) + n119, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n121 + i += n119 } if m.Cinder != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n122, err := m.Cinder.MarshalTo(dAtA[i:]) + n120, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n120 } if m.CephFS != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n123, err := m.CephFS.MarshalTo(dAtA[i:]) + n121, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n121 } if m.FC != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n124, err := m.FC.MarshalTo(dAtA[i:]) + n122, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n122 } if m.Flocker != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n125, err := m.Flocker.MarshalTo(dAtA[i:]) + n123, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n123 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n126, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n124, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n124 } if m.AzureFile != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n127, err := m.AzureFile.MarshalTo(dAtA[i:]) + n125, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n125 } if m.VsphereVolume != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n128, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n126, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 + i += n126 } if m.Quobyte != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n129, err := m.Quobyte.MarshalTo(dAtA[i:]) + n127, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n127 } if m.AzureDisk != nil { dAtA[i] = 0x82 @@ -6575,11 +6434,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n130, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n128, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n128 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -6587,11 +6446,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n131, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n129, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n129 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -6599,11 +6458,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n132, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n130, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n130 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -6611,11 +6470,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n133, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n131, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 + i += n131 } if m.Local != nil { dAtA[i] = 0xa2 @@ -6623,11 +6482,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n134, err := m.Local.MarshalTo(dAtA[i:]) + n132, err := m.Local.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n132 } if m.StorageOS != nil { dAtA[i] = 0xaa @@ -6635,11 +6494,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n135, err := m.StorageOS.MarshalTo(dAtA[i:]) + n133, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 + i += n133 } if m.CSI != nil { dAtA[i] = 0xb2 @@ -6647,11 +6506,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n136, err := m.CSI.MarshalTo(dAtA[i:]) + n134, err := m.CSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 + i += n134 } return i, nil } @@ -6695,21 +6554,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n137, err := (&v).MarshalTo(dAtA[i:]) + n135, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n135 } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n138, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n136, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n136 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { dAtA[i] = 0x1a @@ -6729,11 +6588,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n139, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n137, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n137 } dAtA[i] = 0x2a i++ @@ -6768,11 +6627,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n140, err := m.NodeAffinity.MarshalTo(dAtA[i:]) + n138, err := m.NodeAffinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n140 + i += n138 } return i, nil } @@ -6851,27 +6710,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n141, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n139, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n141 + i += n139 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n142, err := m.Spec.MarshalTo(dAtA[i:]) + n140, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n142 + i += n140 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n143, err := m.Status.MarshalTo(dAtA[i:]) + n141, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n141 return i, nil } @@ -6936,11 +6795,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n144, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n142, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n144 + i += n142 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -7086,19 +6945,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n145, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n143, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n145 + i += n143 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n146, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n144, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n146 + i += n144 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7285,11 +7144,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n147, err := m.ListMeta.MarshalTo(dAtA[i:]) + n145, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n147 + i += n145 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7349,11 +7208,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n148, err := m.SinceTime.MarshalTo(dAtA[i:]) + n146, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n148 + i += n146 } dAtA[i] = 0x30 i++ @@ -7464,11 +7323,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n149, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n147, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n147 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -7514,16 +7373,6 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { i += n } } - if m.WindowsOptions != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size())) - n150, err := m.WindowsOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n150 - } return i, nil } @@ -7546,11 +7395,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n151, err := m.PodController.MarshalTo(dAtA[i:]) + n148, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n151 + i += n148 } return i, nil } @@ -7674,11 +7523,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n152, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n149, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n152 + i += n149 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -7710,11 +7559,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n153, err := m.Affinity.MarshalTo(dAtA[i:]) + n150, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n153 + i += n150 } dAtA[i] = 0x9a i++ @@ -7795,11 +7644,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n154, err := m.DNSConfig.MarshalTo(dAtA[i:]) + n151, err := m.DNSConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n151 } if m.ShareProcessNamespace != nil { dAtA[i] = 0xd8 @@ -7835,26 +7684,6 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) i += copy(dAtA[i:], *m.RuntimeClassName) } - if m.EnableServiceLinks != nil { - dAtA[i] = 0xf0 - i++ - dAtA[i] = 0x1 - i++ - if *m.EnableServiceLinks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.PreemptionPolicy != nil { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) - } return i, nil } @@ -7909,11 +7738,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n155, err := m.StartTime.MarshalTo(dAtA[i:]) + n152, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n155 + i += n152 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -7968,19 +7797,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n156, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n153, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n156 + i += n153 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n157, err := m.Status.MarshalTo(dAtA[i:]) + n154, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n157 + i += n154 return i, nil } @@ -8002,19 +7831,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n155, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n158 + i += n155 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n159, err := m.Template.MarshalTo(dAtA[i:]) + n156, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n159 + i += n156 return i, nil } @@ -8036,11 +7865,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n160, err := m.ListMeta.MarshalTo(dAtA[i:]) + n157, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n160 + i += n157 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8074,19 +7903,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n161, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n161 + i += n158 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n162, err := m.Spec.MarshalTo(dAtA[i:]) + n159, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n162 + i += n159 return i, nil } @@ -8166,19 +7995,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n163, err := m.PodSignature.MarshalTo(dAtA[i:]) + n160, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n163 + i += n160 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n164, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n161, err := m.EvictionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n161 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -8211,11 +8040,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n165, err := m.Preference.MarshalTo(dAtA[i:]) + n162, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n162 return i, nil } @@ -8237,11 +8066,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n166, err := m.Handler.MarshalTo(dAtA[i:]) + n163, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n166 + i += n163 dAtA[i] = 0x10 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) @@ -8334,10 +8163,6 @@ func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) - i += copy(dAtA[i:], m.Tenant) return i, nil } @@ -8395,11 +8220,11 @@ func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n167, err := m.SecretRef.MarshalTo(dAtA[i:]) + n164, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n167 + i += n164 } dAtA[i] = 0x40 i++ @@ -8466,11 +8291,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n168, err := m.SecretRef.MarshalTo(dAtA[i:]) + n165, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n168 + i += n165 } dAtA[i] = 0x40 i++ @@ -8501,11 +8326,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n169, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n169 + i += n166 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) @@ -8537,27 +8362,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n170, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n167 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n171, err := m.Spec.MarshalTo(dAtA[i:]) + n168, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n168 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n172, err := m.Status.MarshalTo(dAtA[i:]) + n169, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n169 return i, nil } @@ -8587,11 +8412,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n173, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n170, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n170 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -8621,11 +8446,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n174, err := m.ListMeta.MarshalTo(dAtA[i:]) + n171, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n171 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8687,11 +8512,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n175, err := m.Template.MarshalTo(dAtA[i:]) + n172, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n172 } dAtA[i] = 0x20 i++ @@ -8770,11 +8595,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n176, err := m.Divisor.MarshalTo(dAtA[i:]) + n173, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n176 + i += n173 return i, nil } @@ -8796,27 +8621,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n177, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n174, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n177 + i += n174 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n178, err := m.Spec.MarshalTo(dAtA[i:]) + n175, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n178 + i += n175 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n179, err := m.Status.MarshalTo(dAtA[i:]) + n176, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n176 return i, nil } @@ -8838,11 +8663,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n180, err := m.ListMeta.MarshalTo(dAtA[i:]) + n177, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n177 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8897,11 +8722,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n181, err := (&v).MarshalTo(dAtA[i:]) + n178, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n178 } } if len(m.Scopes) > 0 { @@ -8923,11 +8748,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n182, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + n179, err := m.ScopeSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n179 } return i, nil } @@ -8971,11 +8796,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) + n180, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n180 } } if len(m.Used) > 0 { @@ -9002,11 +8827,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n184, err := (&v).MarshalTo(dAtA[i:]) + n181, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n181 } } return i, nil @@ -9051,11 +8876,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n185, err := (&v).MarshalTo(dAtA[i:]) + n182, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n185 + i += n182 } } if len(m.Requests) > 0 { @@ -9082,11 +8907,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n186, err := (&v).MarshalTo(dAtA[i:]) + n183, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n186 + i += n183 } } return i, nil @@ -9153,11 +8978,11 @@ func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n187, err := m.SecretRef.MarshalTo(dAtA[i:]) + n184, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n187 + i += n184 } dAtA[i] = 0x20 i++ @@ -9225,11 +9050,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n188, err := m.SecretRef.MarshalTo(dAtA[i:]) + n185, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n185 } dAtA[i] = 0x20 i++ @@ -9359,11 +9184,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n189, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n186, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n189 + i += n186 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -9439,11 +9264,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n187, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n187 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -9475,11 +9300,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n191, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n188 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -9515,11 +9340,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n192, err := m.ListMeta.MarshalTo(dAtA[i:]) + n189, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n189 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9553,11 +9378,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n193, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n190 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9677,11 +9502,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n194, err := m.Capabilities.MarshalTo(dAtA[i:]) + n191, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n191 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -9697,11 +9522,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n195, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n192, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n192 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -9749,16 +9574,6 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) i += copy(dAtA[i:], *m.ProcMount) } - if m.WindowsOptions != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size())) - n196, err := m.WindowsOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n196 - } return i, nil } @@ -9780,11 +9595,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n197, err := m.Reference.MarshalTo(dAtA[i:]) + n193, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n197 + i += n193 return i, nil } @@ -9806,27 +9621,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n198, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n194, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n198 + i += n194 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n199, err := m.Spec.MarshalTo(dAtA[i:]) + n195, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n199 + i += n195 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n200, err := m.Status.MarshalTo(dAtA[i:]) + n196, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n200 + i += n196 return i, nil } @@ -9848,11 +9663,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n201, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n197, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n201 + i += n197 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { dAtA[i] = 0x12 @@ -9908,11 +9723,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n202, err := m.ListMeta.MarshalTo(dAtA[i:]) + n198, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n202 + i += n198 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9977,11 +9792,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n203, err := m.ListMeta.MarshalTo(dAtA[i:]) + n199, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n203 + i += n199 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -10026,11 +9841,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n204, err := m.TargetPort.MarshalTo(dAtA[i:]) + n200, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n204 + i += n200 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) @@ -10177,11 +9992,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n205, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + n201, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n205 + i += n201 } return i, nil } @@ -10204,11 +10019,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n206, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n202, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n206 + i += n202 return i, nil } @@ -10231,11 +10046,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n207, err := m.ClientIP.MarshalTo(dAtA[i:]) + n203, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n207 + i += n203 } return i, nil } @@ -10279,11 +10094,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n208, err := m.SecretRef.MarshalTo(dAtA[i:]) + n204, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n208 + i += n204 } return i, nil } @@ -10327,11 +10142,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n209, err := m.SecretRef.MarshalTo(dAtA[i:]) + n205, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n209 + i += n205 } return i, nil } @@ -10380,11 +10195,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n210, err := m.Port.MarshalTo(dAtA[i:]) + n206, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n210 + i += n206 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -10423,11 +10238,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n211, err := m.TimeAdded.MarshalTo(dAtA[i:]) + n207, err := m.TimeAdded.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n211 + i += n207 } return i, nil } @@ -10592,11 +10407,11 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n212, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n208, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n212 + i += n208 return i, nil } @@ -10667,10 +10482,6 @@ func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) i += copy(dAtA[i:], *m.MountPropagation) } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) - i += copy(dAtA[i:], m.SubPathExpr) return i, nil } @@ -10693,11 +10504,11 @@ func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n213, err := m.Required.MarshalTo(dAtA[i:]) + n209, err := m.Required.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n213 + i += n209 } return i, nil } @@ -10721,41 +10532,41 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n214, err := m.Secret.MarshalTo(dAtA[i:]) + n210, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n214 + i += n210 } if m.DownwardAPI != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n215, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n211, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n215 + i += n211 } if m.ConfigMap != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n216, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n212, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n216 + i += n212 } if m.ServiceAccountToken != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n217, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + n213, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n217 + i += n213 } return i, nil } @@ -10779,151 +10590,151 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n218, err := m.HostPath.MarshalTo(dAtA[i:]) + n214, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n218 + i += n214 } if m.EmptyDir != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n219, err := m.EmptyDir.MarshalTo(dAtA[i:]) + n215, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n219 + i += n215 } if m.GCEPersistentDisk != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n220, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + n216, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n220 + i += n216 } if m.AWSElasticBlockStore != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n221, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n217, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n221 + i += n217 } if m.GitRepo != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n222, err := m.GitRepo.MarshalTo(dAtA[i:]) + n218, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n222 + i += n218 } if m.Secret != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n223, err := m.Secret.MarshalTo(dAtA[i:]) + n219, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n223 + i += n219 } if m.NFS != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n224, err := m.NFS.MarshalTo(dAtA[i:]) + n220, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n224 + i += n220 } if m.ISCSI != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n225, err := m.ISCSI.MarshalTo(dAtA[i:]) + n221, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n225 + i += n221 } if m.Glusterfs != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n226, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n222, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n226 + i += n222 } if m.PersistentVolumeClaim != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n227, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + n223, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n227 + i += n223 } if m.RBD != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n228, err := m.RBD.MarshalTo(dAtA[i:]) + n224, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n228 + i += n224 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n229, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n225, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n229 + i += n225 } if m.Cinder != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n230, err := m.Cinder.MarshalTo(dAtA[i:]) + n226, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n230 + i += n226 } if m.CephFS != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n231, err := m.CephFS.MarshalTo(dAtA[i:]) + n227, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n231 + i += n227 } if m.Flocker != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n232, err := m.Flocker.MarshalTo(dAtA[i:]) + n228, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n232 + i += n228 } if m.DownwardAPI != nil { dAtA[i] = 0x82 @@ -10931,11 +10742,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n233, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n229, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n233 + i += n229 } if m.FC != nil { dAtA[i] = 0x8a @@ -10943,11 +10754,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n234, err := m.FC.MarshalTo(dAtA[i:]) + n230, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n234 + i += n230 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -10955,11 +10766,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n235, err := m.AzureFile.MarshalTo(dAtA[i:]) + n231, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n235 + i += n231 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -10967,11 +10778,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n236, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n232, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n236 + i += n232 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -10979,11 +10790,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n237, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n233, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n237 + i += n233 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -10991,11 +10802,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n238, err := m.Quobyte.MarshalTo(dAtA[i:]) + n234, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n238 + i += n234 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -11003,11 +10814,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n239, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n235, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n239 + i += n235 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -11015,11 +10826,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n240, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n236, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n240 + i += n236 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -11027,11 +10838,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n241, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n237, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n241 + i += n237 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -11039,11 +10850,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n242, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n238, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n242 + i += n238 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -11051,11 +10862,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n243, err := m.Projected.MarshalTo(dAtA[i:]) + n239, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n243 + i += n239 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -11063,23 +10874,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n244, err := m.StorageOS.MarshalTo(dAtA[i:]) + n240, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n244 - } - if m.CSI != nil { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n245, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n245 + i += n240 } return i, nil } @@ -11139,44 +10938,32 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n246, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n241, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n246 + i += n241 return i, nil } -func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GMSACredentialSpecName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) - i += copy(dAtA[i:], *m.GMSACredentialSpecName) - } - if m.GMSACredentialSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) - i += copy(dAtA[i:], *m.GMSACredentialSpec) - } - return i, nil +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 } - func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -11329,37 +11116,6 @@ func (m *CSIPersistentVolumeSource) Size() (n int) { l = m.NodePublishSecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ControllerExpandSecretRef != nil { - l = m.ControllerExpandSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CSIVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - if m.ReadOnly != nil { - n += 2 - } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -12197,21 +11953,6 @@ func (m *GitRepoVolumeSource) Size() (n int) { return n } -func (m *GlusterfsPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.EndpointsNamespace != nil { - l = len(*m.EndpointsNamespace) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *GlusterfsVolumeSource) Size() (n int) { var l int _ = l @@ -13456,10 +13197,6 @@ func (m *PodSecurityContext) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.WindowsOptions != nil { - l = m.WindowsOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -13578,13 +13315,6 @@ func (m *PodSpec) Size() (n int) { l = len(*m.RuntimeClassName) n += 2 + l + sovGenerated(uint64(l)) } - if m.EnableServiceLinks != nil { - n += 3 - } - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 2 + l + sovGenerated(uint64(l)) - } return n } @@ -13758,8 +13488,6 @@ func (m *QuobyteVolumeSource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Group) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Tenant) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14265,10 +13993,6 @@ func (m *SecurityContext) Size() (n int) { l = len(*m.ProcMount) n += 1 + l + sovGenerated(uint64(l)) } - if m.WindowsOptions != nil { - l = m.WindowsOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -14607,8 +14331,6 @@ func (m *VolumeMount) Size() (n int) { l = len(*m.MountPropagation) n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.SubPathExpr) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14755,10 +14477,6 @@ func (m *VolumeSource) Size() (n int) { l = m.StorageOS.Size() n += 2 + l + sovGenerated(uint64(l)) } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) - } return n } @@ -14785,20 +14503,6 @@ func (m *WeightedPodAffinityTerm) Size() (n int) { return n } -func (m *WindowsSecurityContextOptions) Size() (n int) { - var l int - _ = l - if m.GMSACredentialSpecName != nil { - l = len(*m.GMSACredentialSpecName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GMSACredentialSpec != nil { - l = len(*m.GMSACredentialSpec) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -14932,31 +14636,6 @@ func (this *CSIPersistentVolumeSource) String() string { `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ControllerExpandSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerExpandSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) - } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, `}`, }, "") return s @@ -15616,19 +15295,6 @@ func (this *GitRepoVolumeSource) String() string { }, "") return s } -func (this *GlusterfsPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, - `}`, - }, "") - return s -} func (this *GlusterfsVolumeSource) String() string { if this == nil { return "nil" @@ -16344,7 +16010,7 @@ func (this *PersistentVolumeSource) String() string { `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, @@ -16601,7 +16267,6 @@ func (this *PodSecurityContext) String() string { `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, - `WindowsOptions:` + strings.Replace(fmt.Sprintf("%v", this.WindowsOptions), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, `}`, }, "") return s @@ -16660,8 +16325,6 @@ func (this *PodSpec) String() string { `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -16812,7 +16475,6 @@ func (this *QuobyteVolumeSource) String() string { `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, `}`, }, "") return s @@ -17247,7 +16909,6 @@ func (this *SecurityContext) String() string { `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, - `WindowsOptions:` + strings.Replace(fmt.Sprintf("%v", this.WindowsOptions), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, `}`, }, "") return s @@ -17539,7 +17200,6 @@ func (this *VolumeMount) String() string { `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, - `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, `}`, }, "") return s @@ -17599,7 +17259,6 @@ func (this *VolumeSource) String() string { `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, `}`, }, "") return s @@ -17628,17 +17287,6 @@ func (this *WeightedPodAffinityTerm) String() string { }, "") return s } -func (this *WindowsSecurityContextOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsSecurityContextOptions{`, - `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, - `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -18909,14 +18557,51 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.VolumeAttributes == nil { m.VolumeAttributes = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18926,80 +18611,41 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.VolumeAttributes[mapkey] = mapvalue + } else { + var mapvalue string + m.VolumeAttributes[mapkey] = mapvalue } - m.VolumeAttributes[mapkey] = mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -19100,39 +18746,6 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControllerExpandSecretRef == nil { - m.ControllerExpandSecretRef = &SecretReference{} - } - if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19154,7 +18767,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19177,15 +18790,15 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19210,13 +18823,13 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19226,16 +18839,74 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ReadOnly = &b - case 3: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19260,14 +18931,13 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19277,115 +18947,26 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.VolumeAttributes[mapkey] = mapvalue + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19395,78 +18976,24 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &LocalObjectReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.User = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Capabilities) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19491,13 +19018,13 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19507,21 +19034,45 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19543,7 +19094,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19566,10 +19117,10 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19715,7 +19266,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SecretRef == nil { - m.SecretRef = &SecretReference{} + m.SecretRef = &LocalObjectReference{} } if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -19762,7 +19313,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19785,15 +19336,15 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19818,11 +19369,11 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19847,13 +19398,13 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19863,51 +19414,13 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.ReadOnly = bool(v != 0) case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } @@ -19934,32 +19447,12 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} + m.SecretRef = &SecretReference{} } if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19981,7 +19474,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20004,10 +19497,10 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -20115,7 +19608,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SecretRef == nil { - m.SecretRef = &SecretReference{} + m.SecretRef = &LocalObjectReference{} } if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -20142,7 +19635,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20165,15 +19658,85 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20198,11 +19761,11 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20227,13 +19790,13 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20243,17 +19806,26 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20263,24 +19835,20 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -20303,7 +19871,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20326,17 +19894,17 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20346,12 +19914,53 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20373,7 +19982,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20396,17 +20005,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20416,55 +20025,27 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20474,49 +20055,22 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -20539,7 +20093,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { +func (m *ConfigMap) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20562,10 +20116,10 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -20600,7 +20154,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20624,66 +20178,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20693,27 +20188,12 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20723,76 +20203,74 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Data == nil { + m.Data = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Data[mapkey] = mapvalue + } else { + var mapvalue string + m.Data[mapkey] = mapvalue } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20816,15 +20294,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20834,115 +20304,12 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Data == nil { - m.Data = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Data[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20952,26 +20319,26 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.BinaryData == nil { m.BinaryData = make(map[string][]byte) } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20981,81 +20348,42 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.BinaryData[mapkey] = mapvalue + } else { + var mapvalue []byte + m.BinaryData[mapkey] = mapvalue } - m.BinaryData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26585,14 +25913,51 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26602,80 +25967,41 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue } - m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26864,14 +26190,51 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26881,80 +26244,41 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue } - m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -27369,7 +26693,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27392,10 +26716,10 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -27476,276 +26800,118 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } } m.ReadOnly = bool(v != 0) - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.EndpointsNamespace = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EndpointsName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29439,14 +28605,51 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Max == nil { m.Max = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29456,85 +28659,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Max[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Max[ResourceName(mapkey)] = mapvalue } - m.Max[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -29562,14 +28726,51 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Min == nil { m.Min = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29579,85 +28780,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Min[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Min[ResourceName(mapkey)] = mapvalue } - m.Min[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -29685,14 +28847,51 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Default == nil { m.Default = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29702,85 +28901,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Default[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Default[ResourceName(mapkey)] = mapvalue } - m.Default[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 5: if wireType != 2 { @@ -29808,14 +28968,51 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.DefaultRequest == nil { m.DefaultRequest = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29825,85 +29022,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.DefaultRequest[ResourceName(mapkey)] = mapvalue } - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -29931,14 +29089,51 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.MaxLimitRequestRatio == nil { m.MaxLimitRequestRatio = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29948,85 +29143,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue } - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -32440,14 +31596,51 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32457,85 +31650,46 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -33081,68 +32235,211 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigSource == nil { - m.ConfigSource = &NodeConfigSource{} - } - if err := m.ConfigSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int + if m.ConfigSource == nil { + m.ConfigSource = &NodeConfigSource{} + } + if err := m.ConfigSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33152,120 +32449,12 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Capacity[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33275,26 +32464,26 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Allocatable == nil { m.Allocatable = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33304,85 +32493,46 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Allocatable[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Allocatable[ResourceName(mapkey)] = mapvalue } - m.Allocatable[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -35375,14 +34525,51 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35392,85 +34579,46 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -35889,7 +35037,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsPersistentVolumeSource{} + m.Glusterfs = &GlusterfsVolumeSource{} } if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -36565,14 +35713,51 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36582,85 +35767,46 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -39345,39 +38491,6 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WindowsOptions == nil { - m.WindowsOptions = &WindowsSecurityContextOptions{} - } - if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -39697,14 +38810,51 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.NodeSelector == nil { m.NodeSelector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -39714,80 +38864,41 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.NodeSelector[mapkey] = mapvalue + } else { + var mapvalue string + m.NodeSelector[mapkey] = mapvalue } - m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -40118,174 +39229,11 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SchedulerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitContainers = append(m.InitContainers, Container{}) - if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tolerations = append(m.Tolerations, Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostAliases = append(m.HostAliases, HostAlias{}) - if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + m.SchedulerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Priority = &v - case 26: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40309,16 +39257,14 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DNSConfig == nil { - m.DNSConfig = &PodDNSConfig{} - } - if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.InitContainers = append(m.InitContainers, Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 27: + case 21: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -40336,10 +39282,10 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.ShareProcessNamespace = &b - case 28: + m.AutomountServiceAccountToken = &b + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40363,14 +39309,45 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ReadinessGates = append(m.ReadinessGates, PodReadinessGate{}) - if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 29: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40395,12 +39372,64 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.RuntimeClassName = &s + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 30: + case 25: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -40418,10 +39447,41 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.EnableServiceLinks = &b - case 31: + m.ShareProcessNamespace = &b + case 28: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadinessGates = append(m.ReadinessGates, PodReadinessGate{}) + if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40446,8 +39506,8 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s + s := string(dAtA[iNdEx:postIndex]) + m.RuntimeClassName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -42206,35 +41266,6 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tenant = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -43472,14 +42503,51 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43489,80 +42557,41 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -44257,14 +43286,51 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44274,85 +43340,46 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue } - m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -44492,14 +43519,51 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44509,85 +43573,46 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue } - m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -44615,14 +43640,51 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Used == nil { m.Used = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44632,85 +43694,46 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Used[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Used[ResourceName(mapkey)] = mapvalue } - m.Used[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -44788,108 +43811,143 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Limits == nil { - m.Limits = make(ResourceList) - } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Limits == nil { + m.Limits = make(ResourceList) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Limits[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Limits[ResourceName(mapkey)] = mapvalue + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } } - m.Limits[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44899,26 +43957,26 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Requests == nil { m.Requests = make(ResourceList) } - var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44928,85 +43986,46 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Requests[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Requests[ResourceName(mapkey)] = mapvalue } - m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -46150,14 +45169,51 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Data == nil { m.Data = make(map[string][]byte) } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46167,81 +45223,42 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.Data[mapkey] = mapvalue + } else { + var mapvalue []byte + m.Data[mapkey] = mapvalue } - m.Data[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -46298,14 +45315,51 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.StringData == nil { m.StringData = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -46315,80 +45369,41 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.StringData[mapkey] = mapvalue + } else { + var mapvalue string + m.StringData[mapkey] = mapvalue } - m.StringData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -47393,39 +46408,6 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { s := ProcMountType(dAtA[iNdEx:postIndex]) m.ProcMount = &s iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WindowsOptions == nil { - m.WindowsOptions = &WindowsSecurityContextOptions{} - } - if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48521,14 +47503,51 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48538,80 +47557,41 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -50764,35 +49744,6 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { s := MountPropagationMode(dAtA[iNdEx:postIndex]) m.MountPropagation = &s iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPathExpr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51999,39 +50950,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 28: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CSI == nil { - m.CSI = &CSIVolumeSource{} - } - if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52318,116 +51236,6 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } return nil } -func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GMSACredentialSpecName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GMSACredentialSpec = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -52538,823 +51346,804 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 13088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x64, 0x57, - 0x56, 0x18, 0xbe, 0xaf, 0x5b, 0x1f, 0xdd, 0x47, 0xdf, 0x77, 0x3e, 0xac, 0x91, 0x67, 0xa6, 0xc7, - 0xcf, 0xbb, 0xe3, 0xf1, 0xda, 0xd6, 0xac, 0xc7, 0xf6, 0xda, 0xac, 0xbd, 0x06, 0x49, 0x2d, 0xcd, - 0xc8, 0x33, 0xd2, 0xb4, 0x6f, 0x6b, 0x66, 0x76, 0x8d, 0x77, 0xf1, 0x53, 0xbf, 0x2b, 0xe9, 0x59, - 0xad, 0xf7, 0xda, 0xef, 0xbd, 0xd6, 0x8c, 0xfc, 0x83, 0xfa, 0x91, 0x25, 0x10, 0xb6, 0x20, 0xa9, - 0xad, 0x84, 0xca, 0x07, 0x50, 0xa4, 0x8a, 0x90, 0x02, 0x02, 0x49, 0x85, 0x40, 0x80, 0xb0, 0x24, - 0x21, 0x90, 0x54, 0x91, 0xfc, 0xb1, 0x21, 0xa9, 0x4a, 0x2d, 0x55, 0x54, 0x14, 0x10, 0xa9, 0x50, - 0xa4, 0x2a, 0x90, 0x0a, 0xf9, 0x07, 0x85, 0x0a, 0xa9, 0xfb, 0xf9, 0xee, 0x7d, 0xfd, 0x5e, 0x77, - 0x6b, 0xac, 0x91, 0xcd, 0xd6, 0xfe, 0xd7, 0x7d, 0xcf, 0xb9, 0xe7, 0xde, 0x77, 0x3f, 0xcf, 0x39, - 0xf7, 0x7c, 0xc0, 0xab, 0xdb, 0xaf, 0x44, 0xb3, 0x5e, 0x70, 0x75, 0xbb, 0xbd, 0x4e, 0x42, 0x9f, - 0xc4, 0x24, 0xba, 0xba, 0x4b, 0x7c, 0x37, 0x08, 0xaf, 0x0a, 0x80, 0xd3, 0xf2, 0xae, 0x36, 0x82, - 0x90, 0x5c, 0xdd, 0x7d, 0xfe, 0xea, 0x26, 0xf1, 0x49, 0xe8, 0xc4, 0xc4, 0x9d, 0x6d, 0x85, 0x41, - 0x1c, 0x20, 0xc4, 0x71, 0x66, 0x9d, 0x96, 0x37, 0x4b, 0x71, 0x66, 0x77, 0x9f, 0x9f, 0x79, 0x6e, - 0xd3, 0x8b, 0xb7, 0xda, 0xeb, 0xb3, 0x8d, 0x60, 0xe7, 0xea, 0x66, 0xb0, 0x19, 0x5c, 0x65, 0xa8, - 0xeb, 0xed, 0x0d, 0xf6, 0x8f, 0xfd, 0x61, 0xbf, 0x38, 0x89, 0x99, 0x17, 0x93, 0x66, 0x76, 0x9c, - 0xc6, 0x96, 0xe7, 0x93, 0x70, 0xef, 0x6a, 0x6b, 0x7b, 0x93, 0xb5, 0x1b, 0x92, 0x28, 0x68, 0x87, - 0x0d, 0x92, 0x6e, 0xb8, 0x6b, 0xad, 0xe8, 0xea, 0x0e, 0x89, 0x9d, 0x8c, 0xee, 0xce, 0x5c, 0xcd, - 0xab, 0x15, 0xb6, 0xfd, 0xd8, 0xdb, 0xe9, 0x6c, 0xe6, 0xd3, 0xbd, 0x2a, 0x44, 0x8d, 0x2d, 0xb2, - 0xe3, 0x74, 0xd4, 0x7b, 0x21, 0xaf, 0x5e, 0x3b, 0xf6, 0x9a, 0x57, 0x3d, 0x3f, 0x8e, 0xe2, 0x30, - 0x5d, 0xc9, 0xfe, 0xba, 0x05, 0x97, 0xe6, 0xee, 0xd5, 0x17, 0x9b, 0x4e, 0x14, 0x7b, 0x8d, 0xf9, - 0x66, 0xd0, 0xd8, 0xae, 0xc7, 0x41, 0x48, 0xee, 0x06, 0xcd, 0xf6, 0x0e, 0xa9, 0xb3, 0x81, 0x40, - 0xcf, 0x42, 0x69, 0x97, 0xfd, 0x5f, 0xae, 0x4e, 0x5b, 0x97, 0xac, 0x2b, 0xe5, 0xf9, 0xc9, 0xdf, - 0xdc, 0xaf, 0x7c, 0xec, 0x60, 0xbf, 0x52, 0xba, 0x2b, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x68, - 0x23, 0x5a, 0xdb, 0x6b, 0x91, 0xe9, 0x02, 0xc3, 0x1d, 0x17, 0xb8, 0x43, 0x4b, 0x75, 0x5a, 0x8a, - 0x05, 0x14, 0x5d, 0x85, 0x72, 0xcb, 0x09, 0x63, 0x2f, 0xf6, 0x02, 0x7f, 0xba, 0x78, 0xc9, 0xba, - 0x32, 0x38, 0x3f, 0x25, 0x50, 0xcb, 0x35, 0x09, 0xc0, 0x09, 0x0e, 0xed, 0x46, 0x48, 0x1c, 0xf7, - 0xb6, 0xdf, 0xdc, 0x9b, 0x1e, 0xb8, 0x64, 0x5d, 0x29, 0x25, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, - 0xfb, 0x87, 0x0b, 0x50, 0x9a, 0xdb, 0xd8, 0xf0, 0x7c, 0x2f, 0xde, 0x43, 0x77, 0x61, 0xd4, 0x0f, - 0x5c, 0x22, 0xff, 0xb3, 0xaf, 0x18, 0xb9, 0x76, 0x69, 0xb6, 0x73, 0x29, 0xcd, 0xae, 0x6a, 0x78, - 0xf3, 0x93, 0x07, 0xfb, 0x95, 0x51, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0x91, 0x56, 0xe0, 0x2a, - 0xb2, 0x05, 0x46, 0xb6, 0x92, 0x45, 0xb6, 0x96, 0xa0, 0xcd, 0x4f, 0x1c, 0xec, 0x57, 0x46, 0xb4, - 0x02, 0xac, 0x13, 0x41, 0xeb, 0x30, 0x41, 0xff, 0xfa, 0xb1, 0xa7, 0xe8, 0x16, 0x19, 0xdd, 0x27, - 0xf3, 0xe8, 0x6a, 0xa8, 0xf3, 0xa7, 0x0e, 0xf6, 0x2b, 0x13, 0xa9, 0x42, 0x9c, 0x26, 0x68, 0xbf, - 0x0f, 0xe3, 0x73, 0x71, 0xec, 0x34, 0xb6, 0x88, 0xcb, 0x67, 0x10, 0xbd, 0x08, 0x03, 0xbe, 0xb3, - 0x43, 0xc4, 0xfc, 0x5e, 0x12, 0x03, 0x3b, 0xb0, 0xea, 0xec, 0x90, 0xc3, 0xfd, 0xca, 0xe4, 0x1d, - 0xdf, 0x7b, 0xaf, 0x2d, 0x56, 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x5d, 0x03, 0x70, 0xc9, 0xae, 0xd7, - 0x20, 0x35, 0x27, 0xde, 0x12, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x2a, 0x08, 0xd6, 0xb0, 0xec, 0x07, - 0x50, 0x9e, 0xdb, 0x0d, 0x3c, 0xb7, 0x16, 0xb8, 0x11, 0xda, 0x86, 0x89, 0x56, 0x48, 0x36, 0x48, - 0xa8, 0x8a, 0xa6, 0xad, 0x4b, 0xc5, 0x2b, 0x23, 0xd7, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe8, - 0xc7, 0xe1, 0xde, 0xfc, 0x63, 0xa2, 0xbd, 0x89, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xba, 0x00, - 0x67, 0xe6, 0xde, 0x6f, 0x87, 0xa4, 0xea, 0x45, 0xdb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xf6, 0x6a, - 0x32, 0x02, 0x6a, 0x69, 0x55, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x39, 0x18, 0xa6, 0xbf, 0xef, 0xe0, - 0x65, 0xf1, 0xc9, 0xa7, 0x04, 0xf2, 0x48, 0xd5, 0x89, 0x9d, 0x2a, 0x07, 0x61, 0x89, 0x83, 0x56, - 0x60, 0xa4, 0xc1, 0x36, 0xe4, 0xe6, 0x4a, 0xe0, 0x12, 0x36, 0x99, 0xe5, 0xf9, 0x67, 0x28, 0xfa, - 0x42, 0x52, 0x7c, 0xb8, 0x5f, 0x99, 0xe6, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, - 0xed, 0xaf, 0x01, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb2, 0xad, 0x32, 0x9a, - 0xbd, 0x4d, 0xd0, 0xf3, 0x30, 0xb0, 0xed, 0xf9, 0xee, 0xf4, 0x10, 0xa3, 0x75, 0x81, 0xce, 0xf9, - 0x4d, 0xcf, 0x77, 0x0f, 0xf7, 0x2b, 0x53, 0x46, 0x77, 0x68, 0x21, 0x66, 0xa8, 0xf6, 0x9f, 0x58, - 0x50, 0x61, 0xb0, 0x25, 0xaf, 0x49, 0x6a, 0x24, 0x8c, 0xbc, 0x28, 0x26, 0x7e, 0x6c, 0x0c, 0xe8, - 0x35, 0x80, 0x88, 0x34, 0x42, 0x12, 0x6b, 0x43, 0xaa, 0x16, 0x46, 0x5d, 0x41, 0xb0, 0x86, 0x45, - 0x0f, 0x84, 0x68, 0xcb, 0x09, 0xd9, 0xfa, 0x12, 0x03, 0xab, 0x0e, 0x84, 0xba, 0x04, 0xe0, 0x04, - 0xc7, 0x38, 0x10, 0x8a, 0xbd, 0x0e, 0x04, 0xf4, 0x59, 0x98, 0x48, 0x1a, 0x8b, 0x5a, 0x4e, 0x43, - 0x0e, 0x20, 0xdb, 0x32, 0x75, 0x13, 0x84, 0xd3, 0xb8, 0xf6, 0x3f, 0xb0, 0xc4, 0xe2, 0xa1, 0x5f, - 0xfd, 0x11, 0xff, 0x56, 0xfb, 0x97, 0x2d, 0x18, 0x9e, 0xf7, 0x7c, 0xd7, 0xf3, 0x37, 0xd1, 0x3b, - 0x50, 0xa2, 0x77, 0x93, 0xeb, 0xc4, 0x8e, 0x38, 0xf7, 0x3e, 0xa5, 0xed, 0x2d, 0x75, 0x55, 0xcc, - 0xb6, 0xb6, 0x37, 0x69, 0x41, 0x34, 0x4b, 0xb1, 0xe9, 0x6e, 0xbb, 0xbd, 0xfe, 0x2e, 0x69, 0xc4, - 0x2b, 0x24, 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0x6e, 0xc2, 0x50, 0xec, 0x84, 0x9b, - 0x24, 0x16, 0x07, 0x60, 0xe6, 0x41, 0xc5, 0x6b, 0x62, 0xba, 0x23, 0x89, 0xdf, 0x20, 0xc9, 0xb5, - 0xb0, 0xc6, 0xaa, 0x62, 0x41, 0xc2, 0xfe, 0xab, 0xc3, 0x70, 0x6e, 0xa1, 0xbe, 0x9c, 0xb3, 0xae, - 0x2e, 0xc3, 0x90, 0x1b, 0x7a, 0xbb, 0x24, 0x14, 0xe3, 0xac, 0xa8, 0x54, 0x59, 0x29, 0x16, 0x50, - 0xf4, 0x0a, 0x8c, 0xf2, 0x0b, 0xe9, 0x86, 0xe3, 0xbb, 0x4d, 0x39, 0xc4, 0xa7, 0x05, 0xf6, 0xe8, - 0x5d, 0x0d, 0x86, 0x0d, 0xcc, 0x23, 0x2e, 0xaa, 0xcb, 0xa9, 0xcd, 0x98, 0x77, 0xd9, 0x7d, 0xd9, - 0x82, 0x49, 0xde, 0xcc, 0x5c, 0x1c, 0x87, 0xde, 0x7a, 0x3b, 0x26, 0xd1, 0xf4, 0x20, 0x3b, 0xe9, - 0x16, 0xb2, 0x46, 0x2b, 0x77, 0x04, 0x66, 0xef, 0xa6, 0xa8, 0xf0, 0x43, 0x70, 0x5a, 0xb4, 0x3b, - 0x99, 0x06, 0xe3, 0x8e, 0x66, 0xd1, 0xf7, 0x58, 0x30, 0xd3, 0x08, 0xfc, 0x38, 0x0c, 0x9a, 0x4d, - 0x12, 0xd6, 0xda, 0xeb, 0x4d, 0x2f, 0xda, 0xe2, 0xeb, 0x14, 0x93, 0x0d, 0x76, 0x12, 0xe4, 0xcc, - 0xa1, 0x42, 0x12, 0x73, 0x78, 0xf1, 0x60, 0xbf, 0x32, 0xb3, 0x90, 0x4b, 0x0a, 0x77, 0x69, 0x06, - 0x6d, 0x03, 0xa2, 0x57, 0x69, 0x3d, 0x76, 0x36, 0x49, 0xd2, 0xf8, 0x70, 0xff, 0x8d, 0x9f, 0x3d, - 0xd8, 0xaf, 0xa0, 0xd5, 0x0e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x0f, 0x4e, 0xd3, 0xd2, 0x8e, 0x6f, - 0x2d, 0xf5, 0xdf, 0xdc, 0xf4, 0xc1, 0x7e, 0xe5, 0xf4, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0x7d, - 0xb7, 0x05, 0xe7, 0x92, 0xcf, 0x5f, 0x7c, 0xd0, 0x72, 0x7c, 0x37, 0x69, 0xb8, 0xdc, 0x7f, 0xc3, - 0xf4, 0x4c, 0x3e, 0xb7, 0x90, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xb3, 0x00, 0x67, 0x32, 0x57, 0x0b, - 0x9a, 0x84, 0xe2, 0x36, 0xe1, 0x5c, 0x50, 0x19, 0xd3, 0x9f, 0xe8, 0x34, 0x0c, 0xee, 0x3a, 0xcd, - 0xb6, 0xd8, 0x28, 0x98, 0xff, 0xf9, 0x4c, 0xe1, 0x15, 0xcb, 0xfe, 0x37, 0x45, 0x98, 0x58, 0xa8, - 0x2f, 0x3f, 0xd4, 0x2e, 0xd4, 0xaf, 0xa1, 0x42, 0xd7, 0x6b, 0x28, 0xb9, 0xd4, 0x8a, 0xb9, 0x97, - 0xda, 0xff, 0x9f, 0xb1, 0x85, 0x06, 0xd8, 0x16, 0xfa, 0x96, 0x9c, 0x2d, 0x74, 0xcc, 0x1b, 0x67, - 0x37, 0x67, 0x15, 0x0d, 0xb2, 0xc9, 0xcc, 0xe4, 0x58, 0x6e, 0x05, 0x0d, 0xa7, 0x99, 0x3e, 0xfa, - 0x8e, 0xb8, 0x94, 0x8e, 0x67, 0x1e, 0x1b, 0x30, 0xba, 0xe0, 0xb4, 0x9c, 0x75, 0xaf, 0xe9, 0xc5, - 0x1e, 0x89, 0xd0, 0x53, 0x50, 0x74, 0x5c, 0x97, 0x71, 0x5b, 0xe5, 0xf9, 0x33, 0x07, 0xfb, 0x95, - 0xe2, 0x9c, 0x4b, 0xaf, 0x7d, 0x50, 0x58, 0x7b, 0x98, 0x62, 0xa0, 0x4f, 0xc2, 0x80, 0x1b, 0x06, - 0xad, 0xe9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x1a, 0x06, 0xad, 0x14, 0x2a, 0xc3, 0xb1, 0x7f, - 0xad, 0x00, 0xe7, 0x17, 0x48, 0x6b, 0x6b, 0xa9, 0x9e, 0x73, 0x7e, 0x5f, 0x81, 0xd2, 0x4e, 0xe0, - 0x7b, 0x71, 0x10, 0x46, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x44, 0x19, 0x56, 0x50, 0x74, 0x09, 0x06, - 0x5a, 0x09, 0x53, 0x39, 0x2a, 0x19, 0x52, 0xc6, 0x4e, 0x32, 0x08, 0xc5, 0x68, 0x47, 0x24, 0x14, - 0x2b, 0x46, 0x61, 0xdc, 0x89, 0x48, 0x88, 0x19, 0x24, 0xb9, 0x99, 0xe9, 0x9d, 0x2d, 0x4e, 0xe8, - 0xd4, 0xcd, 0x4c, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x28, 0x47, 0xa9, 0x99, 0xed, 0x6b, 0x9b, 0x8e, - 0xb1, 0xab, 0x5b, 0xcd, 0x64, 0x42, 0xc4, 0xb8, 0x51, 0x86, 0x7a, 0x5e, 0xdd, 0x5f, 0x2d, 0x00, - 0xe2, 0x43, 0xf8, 0x17, 0x6c, 0xe0, 0xee, 0x74, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xb8, 0x46, 0xef, - 0x7f, 0x5b, 0x70, 0x7e, 0xc1, 0xf3, 0x5d, 0x12, 0xe6, 0x2c, 0xc0, 0x47, 0x23, 0xcb, 0x1e, 0x8d, - 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x31, 0x2c, 0x31, 0xfb, 0x8f, 0x2d, 0x40, 0xfc, 0xb3, 0x3f, 0x72, - 0x1f, 0x7b, 0xa7, 0xf3, 0x63, 0x8f, 0x61, 0x59, 0xd8, 0xb7, 0x60, 0x7c, 0xa1, 0xe9, 0x11, 0x3f, - 0x5e, 0xae, 0x2d, 0x04, 0xfe, 0x86, 0xb7, 0x89, 0x3e, 0x03, 0xe3, 0xb1, 0xb7, 0x43, 0x82, 0x76, - 0x5c, 0x27, 0x8d, 0xc0, 0x67, 0x92, 0xa4, 0x75, 0x65, 0x70, 0x1e, 0x1d, 0xec, 0x57, 0xc6, 0xd7, - 0x0c, 0x08, 0x4e, 0x61, 0xda, 0xbf, 0x43, 0xc7, 0x2f, 0xd8, 0x69, 0x05, 0x3e, 0xf1, 0xe3, 0x85, - 0xc0, 0x77, 0xb9, 0xc6, 0xe1, 0x33, 0x30, 0x10, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x59, 0x6e, 0x14, - 0x3a, 0x0a, 0x87, 0xfb, 0x95, 0xb3, 0x9d, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0x5b, 0x60, 0x28, - 0x8a, 0x9d, 0xb8, 0x1d, 0x89, 0xd1, 0x7c, 0x42, 0x8e, 0x66, 0x9d, 0x95, 0x1e, 0xee, 0x57, 0x26, - 0x54, 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x34, 0x0c, 0xef, 0x90, 0x28, 0x72, 0x36, 0xe5, 0x6d, - 0x38, 0x21, 0xea, 0x0e, 0xaf, 0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0x49, 0x18, 0x24, 0x61, 0x18, 0x84, - 0x62, 0x8f, 0x8e, 0x09, 0xc4, 0xc1, 0x45, 0x5a, 0x88, 0x39, 0xcc, 0xfe, 0xf7, 0x16, 0x4c, 0xa8, - 0xbe, 0xf2, 0xb6, 0x4e, 0x40, 0x2a, 0x78, 0x0b, 0xa0, 0x21, 0x3f, 0x30, 0x62, 0xb7, 0xc7, 0xc8, - 0xb5, 0xcb, 0x99, 0x17, 0x75, 0xc7, 0x30, 0x26, 0x94, 0x55, 0x51, 0x84, 0x35, 0x6a, 0xf6, 0x3f, - 0xb7, 0xe0, 0x54, 0xea, 0x8b, 0x6e, 0x79, 0x51, 0x8c, 0xde, 0xee, 0xf8, 0xaa, 0xd9, 0xfe, 0xbe, - 0x8a, 0xd6, 0x66, 0xdf, 0xa4, 0x96, 0xb2, 0x2c, 0xd1, 0xbe, 0xe8, 0x06, 0x0c, 0x7a, 0x31, 0xd9, - 0x91, 0x1f, 0xf3, 0x64, 0xd7, 0x8f, 0xe1, 0xbd, 0x4a, 0x66, 0x64, 0x99, 0xd6, 0xc4, 0x9c, 0x80, - 0xfd, 0x37, 0x8a, 0x50, 0xe6, 0xcb, 0x76, 0xc5, 0x69, 0x9d, 0xc0, 0x5c, 0x2c, 0xc3, 0x00, 0xa3, - 0xce, 0x3b, 0xfe, 0x54, 0x76, 0xc7, 0x45, 0x77, 0x66, 0xa9, 0xc8, 0xcf, 0x99, 0x23, 0x75, 0x35, - 0xd0, 0x22, 0xcc, 0x48, 0x20, 0x07, 0x60, 0xdd, 0xf3, 0x9d, 0x70, 0x8f, 0x96, 0x4d, 0x17, 0x19, - 0xc1, 0xe7, 0xba, 0x13, 0x9c, 0x57, 0xf8, 0x9c, 0xac, 0xea, 0x6b, 0x02, 0xc0, 0x1a, 0xd1, 0x99, - 0x97, 0xa1, 0xac, 0x90, 0x8f, 0xc2, 0xe3, 0xcc, 0x7c, 0x16, 0x26, 0x52, 0x6d, 0xf5, 0xaa, 0x3e, - 0xaa, 0xb3, 0x48, 0xbf, 0xc2, 0x4e, 0x01, 0xd1, 0xeb, 0x45, 0x7f, 0x57, 0x9c, 0xa2, 0xef, 0xc3, - 0xe9, 0x66, 0xc6, 0xe1, 0x24, 0xa6, 0xaa, 0xff, 0xc3, 0xec, 0xbc, 0xf8, 0xec, 0xd3, 0x59, 0x50, - 0x9c, 0xd9, 0x06, 0xbd, 0xf6, 0x83, 0x16, 0x5d, 0xf3, 0x4e, 0x53, 0xe7, 0xa0, 0x6f, 0x8b, 0x32, - 0xac, 0xa0, 0xf4, 0x08, 0x3b, 0xad, 0x3a, 0x7f, 0x93, 0xec, 0xd5, 0x49, 0x93, 0x34, 0xe2, 0x20, - 0xfc, 0x50, 0xbb, 0x7f, 0x81, 0x8f, 0x3e, 0x3f, 0x01, 0x47, 0x04, 0x81, 0xe2, 0x4d, 0xb2, 0xc7, - 0xa7, 0x42, 0xff, 0xba, 0x62, 0xd7, 0xaf, 0xfb, 0x39, 0x0b, 0xc6, 0xd4, 0xd7, 0x9d, 0xc0, 0x56, - 0x9f, 0x37, 0xb7, 0xfa, 0x85, 0xae, 0x0b, 0x3c, 0x67, 0x93, 0x7f, 0xb5, 0x00, 0xe7, 0x14, 0x0e, - 0x65, 0xf7, 0xf9, 0x1f, 0xb1, 0xaa, 0xae, 0x42, 0xd9, 0x57, 0x8a, 0x28, 0xcb, 0xd4, 0x00, 0x25, - 0x6a, 0xa8, 0x04, 0x87, 0x72, 0x6d, 0x7e, 0xa2, 0x2d, 0x1a, 0xd5, 0x35, 0xb4, 0x42, 0x1b, 0x3b, - 0x0f, 0xc5, 0xb6, 0xe7, 0x8a, 0x3b, 0xe3, 0x53, 0x72, 0xb4, 0xef, 0x2c, 0x57, 0x0f, 0xf7, 0x2b, - 0x4f, 0xe4, 0xbd, 0x0e, 0xd0, 0xcb, 0x2a, 0x9a, 0xbd, 0xb3, 0x5c, 0xc5, 0xb4, 0x32, 0x9a, 0x83, - 0x09, 0xf9, 0x00, 0x72, 0x97, 0x72, 0x50, 0x81, 0x2f, 0xae, 0x16, 0xa5, 0x66, 0xc5, 0x26, 0x18, - 0xa7, 0xf1, 0x51, 0x15, 0x26, 0xb7, 0xdb, 0xeb, 0xa4, 0x49, 0x62, 0xfe, 0xc1, 0x37, 0x09, 0x57, - 0x42, 0x96, 0x13, 0x61, 0xeb, 0x66, 0x0a, 0x8e, 0x3b, 0x6a, 0xd8, 0x7f, 0xce, 0x8e, 0x78, 0x31, - 0x7a, 0xb5, 0x30, 0xa0, 0x0b, 0x8b, 0x52, 0xff, 0x30, 0x97, 0x73, 0x3f, 0xab, 0xe2, 0x26, 0xd9, - 0x5b, 0x0b, 0x28, 0xb3, 0x9d, 0xbd, 0x2a, 0x8c, 0x35, 0x3f, 0xd0, 0x75, 0xcd, 0xff, 0x42, 0x01, - 0xce, 0xa8, 0x11, 0x30, 0xf8, 0xba, 0xbf, 0xe8, 0x63, 0xf0, 0x3c, 0x8c, 0xb8, 0x64, 0xc3, 0x69, - 0x37, 0x63, 0xa5, 0x11, 0x1f, 0xe4, 0xaf, 0x22, 0xd5, 0xa4, 0x18, 0xeb, 0x38, 0x47, 0x18, 0xb6, - 0x9f, 0x18, 0x61, 0x77, 0x6b, 0xec, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xbb, 0x6b, 0x9e, 0x84, - 0x41, 0x6f, 0x87, 0xf2, 0x5a, 0x05, 0x93, 0x85, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xd0, 0x27, 0x60, - 0xb8, 0x11, 0xec, 0xec, 0x38, 0xbe, 0xcb, 0xae, 0xbc, 0xf2, 0xfc, 0x08, 0x65, 0xc7, 0x16, 0x78, - 0x11, 0x96, 0x30, 0x74, 0x1e, 0x06, 0x9c, 0x70, 0x93, 0xab, 0x25, 0xca, 0xf3, 0x25, 0xda, 0xd2, - 0x5c, 0xb8, 0x19, 0x61, 0x56, 0x4a, 0xa5, 0xaa, 0xfb, 0x41, 0xb8, 0xed, 0xf9, 0x9b, 0x55, 0x2f, - 0x14, 0x5b, 0x42, 0xdd, 0x85, 0xf7, 0x14, 0x04, 0x6b, 0x58, 0x68, 0x09, 0x06, 0x5b, 0x41, 0x18, - 0x47, 0xd3, 0x43, 0x6c, 0xb8, 0x9f, 0xc8, 0x39, 0x88, 0xf8, 0xd7, 0xd6, 0x82, 0x30, 0x4e, 0x3e, - 0x80, 0xfe, 0x8b, 0x30, 0xaf, 0x8e, 0xbe, 0x05, 0x8a, 0xc4, 0xdf, 0x9d, 0x1e, 0x66, 0x54, 0x66, - 0xb2, 0xa8, 0x2c, 0xfa, 0xbb, 0x77, 0x9d, 0x30, 0x39, 0xa5, 0x17, 0xfd, 0x5d, 0x4c, 0xeb, 0xa0, - 0xcf, 0x43, 0x59, 0x6e, 0xf1, 0x48, 0x68, 0xcc, 0x32, 0x97, 0x98, 0x3c, 0x18, 0x30, 0x79, 0xaf, - 0xed, 0x85, 0x64, 0x87, 0xf8, 0x71, 0x94, 0x9c, 0x69, 0x12, 0x1a, 0xe1, 0x84, 0x1a, 0xfa, 0xbc, - 0x54, 0xd3, 0xae, 0x04, 0x6d, 0x3f, 0x8e, 0xa6, 0xcb, 0xac, 0x7b, 0x99, 0x0f, 0x68, 0x77, 0x13, - 0xbc, 0xb4, 0x1e, 0x97, 0x57, 0xc6, 0x06, 0x29, 0x84, 0x61, 0xac, 0xe9, 0xed, 0x12, 0x9f, 0x44, - 0x51, 0x2d, 0x0c, 0xd6, 0xc9, 0x34, 0xb0, 0x9e, 0x9f, 0xcb, 0x7e, 0x57, 0x0a, 0xd6, 0xc9, 0xfc, - 0xd4, 0xc1, 0x7e, 0x65, 0xec, 0x96, 0x5e, 0x07, 0x9b, 0x24, 0xd0, 0x1d, 0x18, 0xa7, 0x72, 0x8d, - 0x97, 0x10, 0x1d, 0xe9, 0x45, 0x94, 0x49, 0x1f, 0xd8, 0xa8, 0x84, 0x53, 0x44, 0xd0, 0x1b, 0x50, - 0x6e, 0x7a, 0x1b, 0xa4, 0xb1, 0xd7, 0x68, 0x92, 0xe9, 0x51, 0x46, 0x31, 0x73, 0x5b, 0xdd, 0x92, - 0x48, 0x5c, 0x2e, 0x52, 0x7f, 0x71, 0x52, 0x1d, 0xdd, 0x85, 0xb3, 0x31, 0x09, 0x77, 0x3c, 0xdf, - 0xa1, 0xdb, 0x41, 0xc8, 0x0b, 0xec, 0x75, 0x6e, 0x8c, 0xad, 0xb7, 0x8b, 0x62, 0xe8, 0xce, 0xae, - 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x36, 0x4c, 0xb0, 0x9d, 0x50, 0x6b, 0x37, 0x9b, 0xb5, 0xa0, - 0xe9, 0x35, 0xf6, 0xa6, 0xc7, 0x19, 0xc1, 0x4f, 0xc8, 0x7b, 0x61, 0xd9, 0x04, 0x1f, 0xee, 0x57, - 0x20, 0xf9, 0x87, 0xd3, 0xb5, 0xd1, 0x3a, 0x7b, 0x8e, 0x69, 0x87, 0x5e, 0xbc, 0x47, 0xd7, 0x2f, - 0x79, 0x10, 0x4f, 0x4f, 0x74, 0x15, 0x85, 0x75, 0x54, 0xf5, 0x66, 0xa3, 0x17, 0xe2, 0x34, 0x41, - 0xba, 0xb5, 0xa3, 0xd8, 0xf5, 0xfc, 0xe9, 0x49, 0x76, 0x62, 0xa8, 0x9d, 0x51, 0xa7, 0x85, 0x98, - 0xc3, 0xd8, 0x53, 0x0c, 0xfd, 0x71, 0x9b, 0x9e, 0xa0, 0x53, 0x0c, 0x31, 0x79, 0x8a, 0x91, 0x00, - 0x9c, 0xe0, 0x50, 0xa6, 0x26, 0x8e, 0xf7, 0xa6, 0x11, 0x43, 0x55, 0xdb, 0x65, 0x6d, 0xed, 0xf3, - 0x98, 0x96, 0xa3, 0x5b, 0x30, 0x4c, 0xfc, 0xdd, 0xa5, 0x30, 0xd8, 0x99, 0x3e, 0x95, 0xbf, 0x67, - 0x17, 0x39, 0x0a, 0x3f, 0xd0, 0x13, 0x01, 0x4f, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0xe9, 0x8c, - 0x19, 0xe1, 0x13, 0x70, 0x9a, 0x4d, 0xc0, 0x6b, 0xa2, 0xee, 0xf4, 0x5a, 0x0e, 0xde, 0x61, 0x17, - 0x18, 0xce, 0xa5, 0x8e, 0xbe, 0x00, 0x63, 0x7c, 0x43, 0xf1, 0x77, 0xdc, 0x68, 0xfa, 0x0c, 0xfb, - 0x9a, 0x4b, 0xf9, 0x9b, 0x93, 0x23, 0xce, 0x9f, 0x11, 0x1d, 0x1a, 0xd3, 0x4b, 0x23, 0x6c, 0x52, - 0xb3, 0xd7, 0x61, 0x5c, 0x9d, 0x5b, 0x6c, 0xe9, 0xa0, 0x0a, 0x0c, 0x32, 0x6e, 0x47, 0xe8, 0xb7, - 0xca, 0x74, 0xa6, 0x18, 0x27, 0x84, 0x79, 0x39, 0x9b, 0x29, 0xef, 0x7d, 0x32, 0xbf, 0x17, 0x13, - 0x2e, 0x55, 0x17, 0xb5, 0x99, 0x92, 0x00, 0x9c, 0xe0, 0xd8, 0xff, 0x97, 0x73, 0x8d, 0xc9, 0xe1, - 0xd8, 0xc7, 0x75, 0xf0, 0x2c, 0x94, 0xb6, 0x82, 0x28, 0xa6, 0xd8, 0xac, 0x8d, 0xc1, 0x84, 0x4f, - 0xbc, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc2, 0x58, 0x43, 0x6f, 0x40, 0xdc, 0x65, 0x6a, 0x08, - 0x8c, 0xd6, 0xb1, 0x89, 0x8b, 0x5e, 0x81, 0x12, 0xb3, 0xc2, 0x68, 0x04, 0x4d, 0xc1, 0x64, 0xc9, - 0x0b, 0xb9, 0x54, 0x13, 0xe5, 0x87, 0xda, 0x6f, 0xac, 0xb0, 0xd1, 0x65, 0x18, 0xa2, 0x5d, 0x58, - 0xae, 0x89, 0x5b, 0x44, 0xa9, 0x6a, 0x6e, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x5f, 0x2f, 0x68, 0xa3, - 0x4c, 0x25, 0x52, 0x82, 0x6a, 0x30, 0x7c, 0xdf, 0xf1, 0x62, 0xcf, 0xdf, 0x14, 0xec, 0xc2, 0xd3, - 0x5d, 0xaf, 0x14, 0x56, 0xe9, 0x1e, 0xaf, 0xc0, 0x2f, 0x3d, 0xf1, 0x07, 0x4b, 0x32, 0x94, 0x62, - 0xd8, 0xf6, 0x7d, 0x4a, 0xb1, 0xd0, 0x2f, 0x45, 0xcc, 0x2b, 0x70, 0x8a, 0xe2, 0x0f, 0x96, 0x64, - 0xd0, 0xdb, 0x00, 0x72, 0x59, 0x12, 0x57, 0x58, 0x3f, 0x3c, 0xdb, 0x9b, 0xe8, 0x9a, 0xaa, 0x33, - 0x3f, 0x4e, 0xaf, 0xd4, 0xe4, 0x3f, 0xd6, 0xe8, 0xd9, 0x31, 0x63, 0xab, 0x3a, 0x3b, 0x83, 0xbe, - 0x9d, 0x9e, 0x04, 0x4e, 0x18, 0x13, 0x77, 0x2e, 0x16, 0x83, 0xf3, 0xc9, 0xfe, 0x64, 0x8a, 0x35, - 0x6f, 0x87, 0xe8, 0xa7, 0x86, 0x20, 0x82, 0x13, 0x7a, 0xf6, 0x2f, 0x15, 0x61, 0x3a, 0xaf, 0xbb, - 0x74, 0xd1, 0x91, 0x07, 0x5e, 0xbc, 0x40, 0xb9, 0x21, 0xcb, 0x5c, 0x74, 0x8b, 0xa2, 0x1c, 0x2b, - 0x0c, 0x3a, 0xfb, 0x91, 0xb7, 0x29, 0x45, 0xc2, 0xc1, 0x64, 0xf6, 0xeb, 0xac, 0x14, 0x0b, 0x28, - 0xc5, 0x0b, 0x89, 0x13, 0x09, 0xf3, 0x1a, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x37, - 0x0d, 0xf4, 0xd0, 0x37, 0x19, 0x43, 0x34, 0x78, 0xbc, 0x43, 0x84, 0xbe, 0x08, 0xb0, 0xe1, 0xf9, - 0x5e, 0xb4, 0xc5, 0xa8, 0x0f, 0x1d, 0x99, 0xba, 0xe2, 0xa5, 0x96, 0x14, 0x15, 0xac, 0x51, 0x44, - 0x2f, 0xc1, 0x88, 0xda, 0x80, 0xcb, 0x55, 0xf6, 0xd6, 0xa8, 0xd9, 0x6e, 0x24, 0xa7, 0x51, 0x15, - 0xeb, 0x78, 0xf6, 0xbb, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, 0xe8, - 0x3e, 0xbe, 0xf6, 0xaf, 0x17, 0x61, 0xc2, 0x68, 0xac, 0x1d, 0xf5, 0x71, 0x66, 0x5d, 0xa7, 0xf7, - 0x9c, 0x13, 0x13, 0xb1, 0xff, 0xec, 0xde, 0x5b, 0x45, 0xbf, 0x0b, 0xe9, 0x0e, 0xe0, 0xf5, 0xd1, - 0x17, 0xa1, 0xdc, 0x74, 0x22, 0xa6, 0xbb, 0x22, 0x62, 0xdf, 0xf5, 0x43, 0x2c, 0x91, 0x23, 0x9c, - 0x28, 0xd6, 0xae, 0x1a, 0x4e, 0x3b, 0x21, 0x49, 0x2f, 0x64, 0xca, 0xfb, 0x48, 0xfb, 0x2d, 0xd5, - 0x09, 0xca, 0x20, 0xed, 0x61, 0x0e, 0x43, 0xaf, 0xc0, 0x68, 0x48, 0xd8, 0xaa, 0x58, 0xa0, 0xac, - 0x1c, 0x5b, 0x66, 0x83, 0x09, 0xcf, 0x87, 0x35, 0x18, 0x36, 0x30, 0x13, 0x56, 0x7e, 0xa8, 0x0b, - 0x2b, 0xff, 0x34, 0x0c, 0xb3, 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0xd3, - 0x0b, 0xa6, 0xd4, 0xe7, 0x82, 0xf9, 0x24, 0x8c, 0x57, 0x1d, 0xb2, 0x13, 0xf8, 0x8b, 0xbe, 0xdb, - 0x0a, 0x3c, 0x3f, 0x46, 0xd3, 0x30, 0xc0, 0x6e, 0x07, 0xbe, 0xb7, 0x07, 0x28, 0x05, 0x3c, 0x40, - 0x19, 0x73, 0x7b, 0x13, 0xce, 0x54, 0x83, 0xfb, 0xfe, 0x7d, 0x27, 0x74, 0xe7, 0x6a, 0xcb, 0x9a, - 0x9c, 0xbb, 0x2a, 0xe5, 0x2c, 0x6e, 0x0f, 0x95, 0x79, 0xa6, 0x6a, 0x35, 0xf9, 0x5d, 0xbb, 0xe4, - 0x35, 0x49, 0x8e, 0x36, 0xe2, 0x6f, 0x15, 0x8c, 0x96, 0x12, 0x7c, 0xf5, 0x60, 0x64, 0xe5, 0x3e, - 0x18, 0xbd, 0x09, 0xa5, 0x0d, 0x8f, 0x34, 0x5d, 0x4c, 0x36, 0xc4, 0x12, 0x7b, 0x2a, 0xdf, 0xc4, - 0x63, 0x89, 0x62, 0x4a, 0xed, 0x13, 0x97, 0xd2, 0x96, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x6d, 0x98, - 0x94, 0x62, 0x80, 0x84, 0x8a, 0x05, 0xf7, 0x74, 0x37, 0xd9, 0xc2, 0x24, 0x7e, 0xfa, 0x60, 0xbf, - 0x32, 0x89, 0x53, 0x64, 0x70, 0x07, 0x61, 0x2a, 0x96, 0xed, 0xd0, 0xa3, 0x75, 0x80, 0x0d, 0x3f, - 0x13, 0xcb, 0x98, 0x84, 0xc9, 0x4a, 0xed, 0x1f, 0xb5, 0xe0, 0xb1, 0x8e, 0x91, 0x11, 0x92, 0xf6, - 0x31, 0xcf, 0x42, 0x5a, 0xf2, 0x2d, 0xf4, 0x96, 0x7c, 0xed, 0x9f, 0xb5, 0xe0, 0xf4, 0xe2, 0x4e, - 0x2b, 0xde, 0xab, 0x7a, 0xe6, 0xeb, 0xce, 0xcb, 0x30, 0xb4, 0x43, 0x5c, 0xaf, 0xbd, 0x23, 0x66, - 0xae, 0x22, 0x8f, 0x9f, 0x15, 0x56, 0x7a, 0xb8, 0x5f, 0x19, 0xab, 0xc7, 0x41, 0xe8, 0x6c, 0x12, - 0x5e, 0x80, 0x05, 0x3a, 0x3b, 0xc4, 0xbd, 0xf7, 0xc9, 0x2d, 0x6f, 0xc7, 0x93, 0x26, 0x3b, 0x5d, - 0x75, 0x67, 0xb3, 0x72, 0x40, 0x67, 0xdf, 0x6c, 0x3b, 0x7e, 0xec, 0xc5, 0x7b, 0xe2, 0x61, 0x46, - 0x12, 0xc1, 0x09, 0x3d, 0xfb, 0xeb, 0x16, 0x4c, 0xc8, 0x75, 0x3f, 0xe7, 0xba, 0x21, 0x89, 0x22, - 0x34, 0x03, 0x05, 0xaf, 0x25, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0xe5, 0x1a, 0x2e, 0x78, 0x2d, 0x54, - 0x83, 0x32, 0xb7, 0xfc, 0x49, 0x16, 0x57, 0x5f, 0xf6, 0x43, 0xac, 0x07, 0x6b, 0xb2, 0x26, 0x4e, - 0x88, 0x48, 0x0e, 0x8e, 0x9d, 0x99, 0x45, 0xf3, 0xd5, 0xeb, 0x86, 0x28, 0xc7, 0x0a, 0x03, 0x5d, - 0x81, 0x92, 0x1f, 0xb8, 0xdc, 0x10, 0x8b, 0xdf, 0x7e, 0x6c, 0xc9, 0xae, 0x8a, 0x32, 0xac, 0xa0, - 0xf6, 0x0f, 0x5a, 0x30, 0x2a, 0xbf, 0xac, 0x4f, 0x66, 0x92, 0x6e, 0xad, 0x84, 0x91, 0x4c, 0xb6, - 0x16, 0x65, 0x06, 0x19, 0xc4, 0xe0, 0x01, 0x8b, 0x47, 0xe1, 0x01, 0xed, 0x1f, 0x29, 0xc0, 0xb8, - 0xec, 0x4e, 0xbd, 0xbd, 0x1e, 0x91, 0x18, 0xad, 0x41, 0xd9, 0xe1, 0x43, 0x4e, 0xe4, 0x8a, 0x7d, - 0x32, 0x5b, 0xf8, 0x30, 0xe6, 0x27, 0xb9, 0x96, 0xe7, 0x64, 0x6d, 0x9c, 0x10, 0x42, 0x4d, 0x98, - 0xf2, 0x83, 0x98, 0x1d, 0xd1, 0x0a, 0xde, 0xed, 0x09, 0x24, 0x4d, 0xfd, 0x9c, 0xa0, 0x3e, 0xb5, - 0x9a, 0xa6, 0x82, 0x3b, 0x09, 0xa3, 0x45, 0xa9, 0xf0, 0x28, 0xe6, 0x8b, 0x1b, 0xfa, 0x2c, 0x64, - 0xeb, 0x3b, 0xec, 0x5f, 0xb5, 0xa0, 0x2c, 0xd1, 0x4e, 0xe2, 0xb5, 0x6b, 0x05, 0x86, 0x23, 0x36, - 0x09, 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x95, 0xdc, 0x3c, 0xfc, 0x7f, 0x84, 0x25, 0x0d, - 0xa6, 0xef, 0x56, 0xdd, 0xff, 0x88, 0xe8, 0xbb, 0x55, 0x7f, 0x72, 0x6e, 0x98, 0x3f, 0x60, 0x7d, - 0xd6, 0xc4, 0x5a, 0xca, 0x20, 0xb5, 0x42, 0xb2, 0xe1, 0x3d, 0x48, 0x33, 0x48, 0x35, 0x56, 0x8a, - 0x05, 0x14, 0xbd, 0x0d, 0xa3, 0x0d, 0xa9, 0xe8, 0x4c, 0x8e, 0x81, 0xcb, 0x5d, 0x95, 0xee, 0xea, - 0x7d, 0x86, 0x1b, 0x69, 0x2f, 0x68, 0xf5, 0xb1, 0x41, 0xcd, 0x7c, 0x6e, 0x2f, 0xf6, 0x7a, 0x6e, - 0x4f, 0xe8, 0xe6, 0x3f, 0x3e, 0xff, 0x98, 0x05, 0x43, 0x5c, 0x5d, 0xd6, 0x9f, 0x7e, 0x51, 0x7b, - 0xae, 0x4a, 0xc6, 0xee, 0x2e, 0x2d, 0x14, 0xcf, 0x4f, 0x68, 0x05, 0xca, 0xec, 0x07, 0x53, 0x1b, - 0x14, 0xf3, 0xad, 0xd3, 0x79, 0xab, 0x7a, 0x07, 0xef, 0xca, 0x6a, 0x38, 0xa1, 0x60, 0xff, 0x50, - 0x91, 0x1e, 0x55, 0x09, 0xaa, 0x71, 0x83, 0x5b, 0x8f, 0xee, 0x06, 0x2f, 0x3c, 0xaa, 0x1b, 0x7c, - 0x13, 0x26, 0x1a, 0xda, 0xe3, 0x56, 0x32, 0x93, 0x57, 0xba, 0x2e, 0x12, 0xed, 0x1d, 0x8c, 0xab, - 0x8c, 0x16, 0x4c, 0x22, 0x38, 0x4d, 0x15, 0x7d, 0x3b, 0x8c, 0xf2, 0x79, 0x16, 0xad, 0x70, 0x8b, - 0x85, 0x4f, 0xe4, 0xaf, 0x17, 0xbd, 0x09, 0xb6, 0x12, 0xeb, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0x7f, - 0xa9, 0x04, 0x83, 0x8b, 0xbb, 0xc4, 0x8f, 0x4f, 0xe0, 0x40, 0x6a, 0xc0, 0xb8, 0xe7, 0xef, 0x06, - 0xcd, 0x5d, 0xe2, 0x72, 0xf8, 0x51, 0x2e, 0xd7, 0xb3, 0x82, 0xf4, 0xf8, 0xb2, 0x41, 0x02, 0xa7, - 0x48, 0x3e, 0x0a, 0x09, 0xf3, 0x3a, 0x0c, 0xf1, 0xb9, 0x17, 0xe2, 0x65, 0xa6, 0x32, 0x98, 0x0d, - 0xa2, 0xd8, 0x05, 0x89, 0xf4, 0xcb, 0xb5, 0xcf, 0xa2, 0x3a, 0x7a, 0x17, 0xc6, 0x37, 0xbc, 0x30, - 0x8a, 0xa9, 0x68, 0x18, 0xc5, 0xce, 0x4e, 0xeb, 0x21, 0x24, 0x4a, 0x35, 0x0e, 0x4b, 0x06, 0x25, - 0x9c, 0xa2, 0x8c, 0x36, 0x61, 0x8c, 0x0a, 0x39, 0x49, 0x53, 0xc3, 0x47, 0x6e, 0x4a, 0xa9, 0x8c, - 0x6e, 0xe9, 0x84, 0xb0, 0x49, 0x97, 0x1e, 0x26, 0x0d, 0x26, 0x14, 0x95, 0x18, 0x47, 0xa1, 0x0e, - 0x13, 0x2e, 0x0d, 0x71, 0x18, 0x3d, 0x93, 0x98, 0xd9, 0x4a, 0xd9, 0x3c, 0x93, 0x34, 0xe3, 0x94, - 0x77, 0xa0, 0x4c, 0xe8, 0x10, 0x52, 0xc2, 0x42, 0x31, 0x7e, 0xb5, 0xbf, 0xbe, 0xae, 0x78, 0x8d, - 0x30, 0x30, 0x65, 0xf9, 0x45, 0x49, 0x09, 0x27, 0x44, 0xd1, 0x02, 0x0c, 0x45, 0x24, 0xf4, 0x48, - 0x24, 0x54, 0xe4, 0x5d, 0xa6, 0x91, 0xa1, 0x71, 0x8b, 0x4f, 0xfe, 0x1b, 0x8b, 0xaa, 0x74, 0x79, - 0x39, 0x4c, 0x1a, 0x62, 0x5a, 0x71, 0x6d, 0x79, 0xcd, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x0d, 0x18, - 0x0e, 0x49, 0x93, 0x29, 0x8b, 0xc6, 0xfa, 0x5f, 0xe4, 0x5c, 0xf7, 0xc4, 0xeb, 0x61, 0x49, 0x00, - 0xdd, 0x04, 0x14, 0x12, 0xca, 0x43, 0x78, 0xfe, 0xa6, 0x32, 0xe6, 0x10, 0xba, 0xee, 0xc7, 0x45, - 0xfb, 0xa7, 0x70, 0x82, 0x21, 0x8d, 0x6f, 0x71, 0x46, 0x35, 0x74, 0x1d, 0xa6, 0x54, 0xe9, 0xb2, - 0x1f, 0xc5, 0x8e, 0xdf, 0x20, 0x4c, 0xcd, 0x5d, 0x4e, 0xb8, 0x22, 0x9c, 0x46, 0xc0, 0x9d, 0x75, - 0xec, 0x9f, 0xa6, 0xec, 0x0c, 0x1d, 0xad, 0x13, 0xe0, 0x05, 0x5e, 0x37, 0x79, 0x81, 0x73, 0xb9, - 0x33, 0x97, 0xc3, 0x07, 0x1c, 0x58, 0x30, 0xa2, 0xcd, 0x6c, 0xb2, 0x66, 0xad, 0x2e, 0x6b, 0xb6, - 0x0d, 0x93, 0x74, 0xa5, 0xdf, 0x5e, 0x8f, 0x48, 0xb8, 0x4b, 0x5c, 0xb6, 0x30, 0x0b, 0x0f, 0xb7, - 0x30, 0xd5, 0x2b, 0xf3, 0xad, 0x14, 0x41, 0xdc, 0xd1, 0x04, 0x7a, 0x59, 0x6a, 0x4e, 0x8a, 0x86, - 0x91, 0x16, 0xd7, 0x8a, 0x1c, 0xee, 0x57, 0x26, 0xb5, 0x0f, 0xd1, 0x35, 0x25, 0xf6, 0x3b, 0xf2, - 0x1b, 0xd5, 0x6b, 0x7e, 0x43, 0x2d, 0x96, 0xd4, 0x6b, 0xbe, 0x5a, 0x0e, 0x38, 0xc1, 0xa1, 0x7b, - 0x94, 0x8a, 0x20, 0xe9, 0xd7, 0x7c, 0x2a, 0xa0, 0x60, 0x06, 0xb1, 0x5f, 0x00, 0x58, 0x7c, 0x40, - 0x1a, 0x7c, 0xa9, 0xeb, 0x0f, 0x90, 0x56, 0xfe, 0x03, 0xa4, 0xfd, 0x1f, 0x2d, 0x18, 0x5f, 0x5a, - 0x30, 0xc4, 0xc4, 0x59, 0x00, 0x2e, 0x1b, 0xdd, 0xbb, 0xb7, 0x2a, 0x75, 0xeb, 0x5c, 0x3d, 0xaa, - 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x41, 0xb1, 0xd9, 0xf6, 0x85, 0xc8, 0x32, 0x7c, 0xb0, 0x5f, 0x29, - 0xde, 0x6a, 0xfb, 0x98, 0x96, 0x69, 0x16, 0x82, 0xc5, 0xbe, 0x2d, 0x04, 0x7b, 0x7a, 0xea, 0xa1, - 0x0a, 0x0c, 0xde, 0xbf, 0xef, 0xb9, 0xdc, 0x1f, 0x42, 0xe8, 0xfd, 0xef, 0xdd, 0x5b, 0xae, 0x46, - 0x98, 0x97, 0xdb, 0x5f, 0x29, 0xc2, 0xcc, 0x52, 0x93, 0x3c, 0xf8, 0x80, 0x3e, 0x21, 0xfd, 0xda, - 0x37, 0x1e, 0x8d, 0x5f, 0x3c, 0xaa, 0x0d, 0x6b, 0xef, 0xf1, 0xd8, 0x80, 0x61, 0xfe, 0x98, 0x2d, - 0x3d, 0x44, 0x5e, 0xcd, 0x6a, 0x3d, 0x7f, 0x40, 0x66, 0xf9, 0xa3, 0xb8, 0x30, 0x70, 0x57, 0x37, - 0xad, 0x28, 0xc5, 0x92, 0xf8, 0xcc, 0x67, 0x60, 0x54, 0xc7, 0x3c, 0x92, 0x35, 0xf9, 0x5f, 0x2a, - 0xc2, 0x24, 0xed, 0xc1, 0x23, 0x9d, 0x88, 0x3b, 0x9d, 0x13, 0x71, 0xdc, 0x16, 0xc5, 0xbd, 0x67, - 0xe3, 0xed, 0xf4, 0x6c, 0x3c, 0x9f, 0x37, 0x1b, 0x27, 0x3d, 0x07, 0xdf, 0x63, 0xc1, 0xa9, 0xa5, - 0x66, 0xd0, 0xd8, 0x4e, 0x59, 0xfd, 0xbe, 0x04, 0x23, 0xf4, 0x1c, 0x8f, 0x0c, 0x87, 0x34, 0xc3, - 0x45, 0x51, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x73, 0x67, 0xb9, 0x9a, 0xe5, 0xd9, 0x28, 0x40, - 0x58, 0xc7, 0xb3, 0xbf, 0x66, 0xc1, 0x85, 0xeb, 0x0b, 0x8b, 0xc9, 0x52, 0xec, 0x70, 0xae, 0xa4, - 0x52, 0xa0, 0xab, 0x75, 0x25, 0x91, 0x02, 0xab, 0xac, 0x17, 0x02, 0xfa, 0x51, 0x71, 0x1c, 0xfe, - 0x29, 0x0b, 0x4e, 0x5d, 0xf7, 0x62, 0x7a, 0x2d, 0xa7, 0xdd, 0xfc, 0xe8, 0xbd, 0x1c, 0x79, 0x71, - 0x10, 0xee, 0xa5, 0xdd, 0xfc, 0xb0, 0x82, 0x60, 0x0d, 0x8b, 0xb7, 0xbc, 0xeb, 0x31, 0x33, 0xaa, - 0x82, 0xa9, 0x8a, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0xfa, 0x61, 0xae, 0x17, 0x32, 0x51, 0x62, 0x4f, - 0x9c, 0xb0, 0xea, 0xc3, 0xaa, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0x8f, 0x2c, 0xa8, 0x5c, 0x6f, 0xb6, - 0xa3, 0x98, 0x84, 0x1b, 0x51, 0xce, 0xe9, 0xf8, 0x02, 0x94, 0x89, 0x14, 0xdc, 0x45, 0xaf, 0x15, - 0xab, 0xa9, 0x24, 0x7a, 0xee, 0x6d, 0xa8, 0xf0, 0xfa, 0xf0, 0x21, 0x38, 0x9a, 0x11, 0xf8, 0x12, - 0x20, 0xa2, 0xb7, 0xa5, 0xbb, 0x5f, 0x32, 0x3f, 0xae, 0xc5, 0x0e, 0x28, 0xce, 0xa8, 0x61, 0xff, - 0xa8, 0x05, 0x67, 0xd4, 0x07, 0x7f, 0xe4, 0x3e, 0xd3, 0xfe, 0xf9, 0x02, 0x8c, 0xdd, 0x58, 0x5b, - 0xab, 0x5d, 0x27, 0xb1, 0xb8, 0xb6, 0x7b, 0xeb, 0xd6, 0xb1, 0xa6, 0x22, 0xec, 0x26, 0x05, 0xb6, - 0x63, 0xaf, 0x39, 0xcb, 0xbd, 0xf8, 0x67, 0x97, 0xfd, 0xf8, 0x76, 0x58, 0x8f, 0x43, 0xcf, 0xdf, - 0xcc, 0x54, 0x2a, 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0x17, 0x60, 0x88, 0x85, 0x11, 0x90, - 0x93, 0xf0, 0xb8, 0x12, 0xa2, 0x58, 0xe9, 0xe1, 0x7e, 0xa5, 0x7c, 0x07, 0x2f, 0xf3, 0x3f, 0x58, - 0xa0, 0xa2, 0x3b, 0x30, 0xb2, 0x15, 0xc7, 0xad, 0x1b, 0xc4, 0x71, 0x49, 0x28, 0x8f, 0xc3, 0x8b, - 0x59, 0xc7, 0x21, 0x1d, 0x04, 0x8e, 0x96, 0x9c, 0x20, 0x49, 0x59, 0x84, 0x75, 0x3a, 0x76, 0x1d, - 0x20, 0x81, 0x1d, 0x93, 0x42, 0xc5, 0xfe, 0x7d, 0x0b, 0x86, 0xb9, 0x47, 0x67, 0x88, 0x5e, 0x83, - 0x01, 0xf2, 0x80, 0x34, 0x04, 0xab, 0x9c, 0xd9, 0xe1, 0x84, 0xd3, 0xe2, 0xcf, 0x03, 0xf4, 0x3f, - 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa6, 0xbd, 0xbd, 0xae, 0xdc, 0x5b, 0x9f, 0xc8, 0xfb, 0x62, 0x35, - 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xaa, 0xee, 0x46, 0xab, 0x4e, 0x4f, 0xec, 0xb8, - 0x1b, 0x63, 0xb1, 0xb6, 0x50, 0xe3, 0x48, 0x82, 0x1a, 0x57, 0x75, 0xcb, 0x42, 0x9c, 0x10, 0xb1, - 0xd7, 0xa0, 0x4c, 0x27, 0x75, 0xae, 0xe9, 0x39, 0xdd, 0xb5, 0xec, 0xcf, 0x40, 0x59, 0x6a, 0xbc, - 0x23, 0xe1, 0xc9, 0xc5, 0xa8, 0x4a, 0x85, 0x78, 0x84, 0x13, 0xb8, 0xbd, 0x01, 0xa7, 0x99, 0xa9, - 0x83, 0x13, 0x6f, 0x19, 0x7b, 0xac, 0xf7, 0x62, 0x7e, 0x56, 0x48, 0x9e, 0x7c, 0x66, 0xa6, 0x35, - 0x67, 0x89, 0x51, 0x49, 0x31, 0x91, 0x42, 0xed, 0x3f, 0x1c, 0x80, 0xc7, 0x97, 0xeb, 0xf9, 0xce, - 0xbe, 0xaf, 0xc0, 0x28, 0xe7, 0x4b, 0xe9, 0xd2, 0x76, 0x9a, 0xa2, 0x5d, 0xf5, 0x10, 0xb8, 0xa6, - 0xc1, 0xb0, 0x81, 0x89, 0x2e, 0x40, 0xd1, 0x7b, 0xcf, 0x4f, 0xdb, 0x1d, 0x2f, 0xbf, 0xb9, 0x8a, - 0x69, 0x39, 0x05, 0x53, 0x16, 0x97, 0xdf, 0x1d, 0x0a, 0xac, 0xd8, 0xdc, 0xd7, 0x61, 0xdc, 0x8b, - 0x1a, 0x91, 0xb7, 0xec, 0xd3, 0x73, 0x46, 0x3b, 0xa9, 0x94, 0x56, 0x84, 0x76, 0x5a, 0x41, 0x71, - 0x0a, 0x5b, 0xbb, 0xc8, 0x06, 0xfb, 0x66, 0x93, 0x7b, 0xba, 0x36, 0x51, 0x09, 0xa0, 0xc5, 0xbe, - 0x2e, 0x62, 0x56, 0x7c, 0x42, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x22, 0x67, 0x63, 0xcb, - 0x69, 0xcd, 0xb5, 0xe3, 0xad, 0xaa, 0x17, 0x35, 0x82, 0x5d, 0x12, 0xee, 0x31, 0x6d, 0x41, 0x29, - 0x11, 0x39, 0x15, 0x60, 0xe1, 0xc6, 0x5c, 0x8d, 0x62, 0xe2, 0xce, 0x3a, 0x26, 0x1b, 0x0c, 0xc7, - 0xc1, 0x06, 0xcf, 0xc1, 0x84, 0x6c, 0xa6, 0x4e, 0x22, 0x76, 0x29, 0x8e, 0xb0, 0x8e, 0x29, 0xdb, - 0x62, 0x51, 0xac, 0xba, 0x95, 0xc6, 0x47, 0x2f, 0xc3, 0x98, 0xe7, 0x7b, 0xb1, 0xe7, 0xc4, 0x41, - 0xc8, 0x58, 0x0a, 0xae, 0x18, 0x60, 0xa6, 0x7b, 0xcb, 0x3a, 0x00, 0x9b, 0x78, 0xf6, 0x7f, 0x1d, - 0x80, 0x29, 0x36, 0x6d, 0xdf, 0x5c, 0x61, 0x1f, 0x99, 0x15, 0x76, 0xa7, 0x73, 0x85, 0x1d, 0x07, - 0x7f, 0xff, 0x61, 0x2e, 0xb3, 0x77, 0xa1, 0xac, 0x8c, 0x9f, 0xa5, 0xf7, 0x83, 0x95, 0xe3, 0xfd, - 0xd0, 0x9b, 0xfb, 0x90, 0xef, 0xd6, 0xc5, 0xcc, 0x77, 0xeb, 0xbf, 0x63, 0x41, 0x62, 0x03, 0x8a, - 0x6e, 0x40, 0xb9, 0x15, 0x30, 0x3b, 0x8b, 0x50, 0x1a, 0x2f, 0x3d, 0x9e, 0x79, 0x51, 0xf1, 0x4b, - 0x91, 0x8f, 0x5f, 0x4d, 0xd6, 0xc0, 0x49, 0x65, 0x34, 0x0f, 0xc3, 0xad, 0x90, 0xd4, 0x63, 0xe6, - 0xf3, 0xdb, 0x93, 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0x2f, 0x58, 0x00, 0xfc, 0x69, - 0xd8, 0xf1, 0x37, 0xc9, 0x09, 0xa8, 0xbb, 0xab, 0x30, 0x10, 0xb5, 0x48, 0xa3, 0x9b, 0x05, 0x4c, - 0xd2, 0x9f, 0x7a, 0x8b, 0x34, 0x92, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x2f, 0xc0, 0x78, - 0x82, 0xb6, 0x1c, 0x93, 0x1d, 0xf4, 0x9c, 0xe1, 0x03, 0x78, 0x2e, 0xe5, 0x03, 0x58, 0x66, 0xd8, - 0x9a, 0x66, 0xf5, 0x5d, 0x28, 0xee, 0x38, 0x0f, 0x84, 0xea, 0xec, 0x99, 0xee, 0xdd, 0xa0, 0xf4, - 0x67, 0x57, 0x9c, 0x07, 0x5c, 0x48, 0x7c, 0x46, 0x2e, 0x90, 0x15, 0xe7, 0xc1, 0x21, 0xb7, 0x73, - 0x61, 0x87, 0xd4, 0x2d, 0x2f, 0x8a, 0xbf, 0xf4, 0x5f, 0x92, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, - 0x5b, 0x9e, 0x2f, 0x1e, 0x4a, 0xfb, 0x6a, 0xcb, 0xf3, 0xd3, 0x6d, 0x79, 0x7e, 0x1f, 0x6d, 0x79, - 0x3e, 0x7a, 0x1f, 0x86, 0x85, 0x51, 0x82, 0xf0, 0xb9, 0xbf, 0xda, 0x47, 0x7b, 0xc2, 0xa6, 0x81, - 0xb7, 0x79, 0x55, 0x0a, 0xc1, 0xa2, 0xb4, 0x67, 0xbb, 0xb2, 0x41, 0xf4, 0x37, 0x2d, 0x18, 0x17, - 0xbf, 0x31, 0x79, 0xaf, 0x4d, 0xa2, 0x58, 0xf0, 0x9e, 0x9f, 0xee, 0xbf, 0x0f, 0xa2, 0x22, 0xef, - 0xca, 0xa7, 0xe5, 0x31, 0x6b, 0x02, 0x7b, 0xf6, 0x28, 0xd5, 0x0b, 0xf4, 0x8f, 0x2c, 0x38, 0xbd, - 0xe3, 0x3c, 0xe0, 0x2d, 0xf2, 0x32, 0xec, 0xc4, 0x5e, 0x20, 0x8c, 0xf5, 0x5f, 0xeb, 0x6f, 0xfa, - 0x3b, 0xaa, 0xf3, 0x4e, 0x4a, 0xbb, 0xde, 0xd3, 0x59, 0x28, 0x3d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, - 0x0d, 0x28, 0xc9, 0xf5, 0x96, 0xa1, 0x6a, 0xa8, 0xea, 0x8c, 0xf5, 0x91, 0x6d, 0x42, 0x74, 0x47, - 0x3c, 0xda, 0x8e, 0x58, 0x6b, 0x8f, 0xb4, 0x9d, 0x77, 0x61, 0x54, 0x5f, 0x63, 0x8f, 0xb4, 0xad, - 0xf7, 0xe0, 0x54, 0xc6, 0x5a, 0x7a, 0xa4, 0x4d, 0xde, 0x87, 0x73, 0xb9, 0xeb, 0xe3, 0x51, 0x36, - 0x6c, 0xff, 0xbc, 0xa5, 0x9f, 0x83, 0x27, 0xf0, 0xe6, 0xb0, 0x60, 0xbe, 0x39, 0x5c, 0xec, 0xbe, - 0x73, 0x72, 0x1e, 0x1e, 0xde, 0xd6, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x06, 0x0c, 0x35, 0x69, 0x89, - 0xb4, 0x86, 0xb1, 0x7b, 0xef, 0xc8, 0x84, 0x97, 0x62, 0xe5, 0x11, 0x16, 0x14, 0xec, 0x5f, 0xb6, - 0x60, 0xe0, 0x04, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x5c, 0x2e, 0x69, 0x11, 0x0e, 0x70, 0x16, 0x3b, - 0xf7, 0x17, 0x1f, 0xc4, 0xc4, 0x8f, 0x98, 0xa8, 0x98, 0x39, 0x30, 0xdf, 0x01, 0xa7, 0x6e, 0x05, - 0x8e, 0x3b, 0xef, 0x34, 0x1d, 0xbf, 0x41, 0xc2, 0x65, 0x7f, 0xb3, 0xa7, 0x59, 0x96, 0x6e, 0x44, - 0x55, 0xe8, 0x65, 0x44, 0x65, 0x6f, 0x01, 0xd2, 0x1b, 0x10, 0x86, 0xab, 0x18, 0x86, 0x3d, 0xde, - 0x94, 0x18, 0xfe, 0xa7, 0xb2, 0xb9, 0xbb, 0x8e, 0x9e, 0x69, 0x26, 0x99, 0xbc, 0x00, 0x4b, 0x42, - 0xf6, 0x2b, 0x90, 0xe9, 0xac, 0xd6, 0x5b, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x58, 0xcd, 0x23, 0x8a, - 0xb4, 0x76, 0x4a, 0x2b, 0x99, 0x11, 0x99, 0xc6, 0xfe, 0xb2, 0x05, 0x13, 0xab, 0xa9, 0x80, 0x1d, - 0x97, 0xd9, 0x03, 0x68, 0x86, 0x32, 0xbc, 0xce, 0x4a, 0xb1, 0x80, 0x1e, 0xbb, 0x0e, 0xea, 0xcf, - 0x2d, 0x48, 0xfc, 0x47, 0x4f, 0x80, 0xf1, 0x5a, 0x30, 0x18, 0xaf, 0x4c, 0xdd, 0x88, 0xea, 0x4e, - 0x1e, 0xdf, 0x85, 0x6e, 0xaa, 0x60, 0x09, 0x5d, 0xd4, 0x22, 0x09, 0x19, 0xee, 0x5a, 0x3f, 0x6e, - 0x46, 0x54, 0x90, 0xe1, 0x13, 0x98, 0xed, 0x94, 0xc2, 0xfd, 0x88, 0xd8, 0x4e, 0xa9, 0xfe, 0xe4, - 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x2b, 0xb3, 0x85, 0x77, 0x9a, 0xde, 0xfb, 0x44, - 0x45, 0x7c, 0xa9, 0x08, 0xdb, 0x76, 0x51, 0x7a, 0xb8, 0x5f, 0x19, 0x53, 0xff, 0x78, 0x84, 0xb9, - 0xa4, 0x8a, 0x7d, 0x03, 0x26, 0x52, 0x03, 0x86, 0x5e, 0x82, 0xc1, 0xd6, 0x96, 0x13, 0x91, 0x94, - 0xbd, 0xe8, 0x60, 0x8d, 0x16, 0x1e, 0xee, 0x57, 0xc6, 0x55, 0x05, 0x56, 0x82, 0x39, 0xb6, 0xfd, - 0x3f, 0x2d, 0x18, 0x58, 0x0d, 0xdc, 0x93, 0x58, 0x4c, 0xaf, 0x1b, 0x8b, 0xe9, 0x7c, 0x5e, 0x7c, - 0xce, 0xdc, 0x75, 0xb4, 0x94, 0x5a, 0x47, 0x17, 0x73, 0x29, 0x74, 0x5f, 0x42, 0x3b, 0x30, 0xc2, - 0xa2, 0x7e, 0x0a, 0xfb, 0xd5, 0x17, 0x0c, 0x19, 0xa0, 0x92, 0x92, 0x01, 0x26, 0x34, 0x54, 0x4d, - 0x12, 0x78, 0x1a, 0x86, 0x85, 0x0d, 0x65, 0xda, 0xea, 0x5f, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xac, - 0x08, 0x46, 0x94, 0x51, 0xf4, 0xab, 0x16, 0xcc, 0x86, 0xdc, 0x8d, 0xd2, 0xad, 0xb6, 0x43, 0xcf, - 0xdf, 0xac, 0x37, 0xb6, 0x88, 0xdb, 0x6e, 0x7a, 0xfe, 0xe6, 0xf2, 0xa6, 0x1f, 0xa8, 0xe2, 0xc5, - 0x07, 0xa4, 0xd1, 0x66, 0x0f, 0x21, 0x3d, 0x42, 0x9a, 0x2a, 0x1b, 0xa5, 0x6b, 0x07, 0xfb, 0x95, - 0x59, 0x7c, 0x24, 0xda, 0xf8, 0x88, 0x7d, 0x41, 0x5f, 0xb3, 0xe0, 0x2a, 0x0f, 0xbe, 0xd9, 0x7f, - 0xff, 0xbb, 0x48, 0x4c, 0x35, 0x49, 0x2a, 0x21, 0xb2, 0x46, 0xc2, 0x9d, 0xf9, 0x97, 0xc5, 0x80, - 0x5e, 0xad, 0x1d, 0xad, 0x2d, 0x7c, 0xd4, 0xce, 0xd9, 0xff, 0xaa, 0x08, 0x63, 0xc2, 0x83, 0x5f, - 0x84, 0x86, 0x79, 0xc9, 0x58, 0x12, 0x4f, 0xa4, 0x96, 0xc4, 0x94, 0x81, 0x7c, 0x3c, 0x51, 0x61, - 0x22, 0x98, 0x6a, 0x3a, 0x51, 0x7c, 0x83, 0x38, 0x61, 0xbc, 0x4e, 0x1c, 0x6e, 0xbb, 0x53, 0x3c, - 0xb2, 0x9d, 0x91, 0x52, 0xd1, 0xdc, 0x4a, 0x13, 0xc3, 0x9d, 0xf4, 0xd1, 0x2e, 0x20, 0x66, 0x80, - 0x14, 0x3a, 0x7e, 0xc4, 0xbf, 0xc5, 0x13, 0x6f, 0x06, 0x47, 0x6b, 0x75, 0x46, 0xb4, 0x8a, 0x6e, - 0x75, 0x50, 0xc3, 0x19, 0x2d, 0x68, 0x86, 0x65, 0x83, 0xfd, 0x1a, 0x96, 0x0d, 0xf5, 0x70, 0xad, - 0xf1, 0x61, 0xb2, 0x23, 0x08, 0xc3, 0x5b, 0x50, 0x56, 0x06, 0x80, 0xe2, 0xd0, 0xe9, 0x1e, 0xcb, - 0x24, 0x4d, 0x81, 0xab, 0x51, 0x12, 0xe3, 0xd3, 0x84, 0x9c, 0xfd, 0x8f, 0x0b, 0x46, 0x83, 0x7c, - 0x12, 0x57, 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xe9, 0x13, 0x57, 0xec, 0xd8, 0x8f, 0xe7, 0xed, 0x58, - 0xa3, 0x19, 0x66, 0x84, 0x39, 0x27, 0x6a, 0x62, 0x45, 0x03, 0xdd, 0xe0, 0x16, 0x52, 0xbb, 0x92, - 0xe7, 0xef, 0x8f, 0x1a, 0x48, 0x1b, 0xaa, 0x5d, 0x82, 0x45, 0x7d, 0xf4, 0x05, 0x6e, 0xc2, 0x76, - 0xd3, 0x0f, 0xee, 0xfb, 0xd7, 0x83, 0x40, 0xba, 0xdd, 0xf5, 0x47, 0x70, 0x4a, 0x1a, 0xae, 0xa9, - 0xea, 0xd8, 0xa4, 0xd6, 0x5f, 0xa0, 0xa2, 0xef, 0x84, 0x53, 0x94, 0xb4, 0xe9, 0x3c, 0x13, 0x21, - 0x02, 0x13, 0x22, 0x3c, 0x84, 0x2c, 0x13, 0x63, 0x97, 0xc9, 0xce, 0x9b, 0xb5, 0x13, 0xa5, 0xdf, - 0x4d, 0x93, 0x04, 0x4e, 0xd3, 0xb4, 0x7f, 0xd2, 0x02, 0x66, 0xf6, 0x7f, 0x02, 0x2c, 0xc3, 0x67, - 0x4d, 0x96, 0x61, 0x3a, 0x6f, 0x90, 0x73, 0xb8, 0x85, 0x17, 0xf9, 0xca, 0xaa, 0x85, 0xc1, 0x83, - 0x3d, 0x61, 0x3e, 0xd0, 0x9b, 0x93, 0xb5, 0xff, 0x8f, 0xc5, 0x0f, 0x31, 0xe5, 0x89, 0x8f, 0xbe, - 0x0b, 0x4a, 0x0d, 0xa7, 0xe5, 0x34, 0x78, 0x48, 0xec, 0x5c, 0xad, 0x8e, 0x51, 0x69, 0x76, 0x41, - 0xd4, 0xe0, 0x5a, 0x0a, 0x19, 0x66, 0xa4, 0x24, 0x8b, 0x7b, 0x6a, 0x26, 0x54, 0x93, 0x33, 0xdb, - 0x30, 0x66, 0x10, 0x7b, 0xa4, 0x22, 0xed, 0x77, 0xf1, 0x2b, 0x56, 0x85, 0xc5, 0xd9, 0x81, 0x29, - 0x5f, 0xfb, 0x4f, 0x2f, 0x14, 0x29, 0xa6, 0x7c, 0xbc, 0xd7, 0x25, 0xca, 0x6e, 0x1f, 0xcd, 0xad, - 0x21, 0x45, 0x06, 0x77, 0x52, 0xb6, 0x7f, 0xdc, 0x82, 0xc7, 0x74, 0x44, 0x2d, 0x48, 0x42, 0x2f, - 0x3d, 0x71, 0x15, 0x4a, 0x41, 0x8b, 0x84, 0x4e, 0x1c, 0x84, 0xe2, 0xd6, 0xb8, 0x22, 0x07, 0xfd, - 0xb6, 0x28, 0x3f, 0x14, 0x01, 0x25, 0x25, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x2a, 0xc7, 0xb0, 0xc1, - 0x88, 0x44, 0x00, 0x0b, 0x76, 0x06, 0xb0, 0x27, 0xd3, 0x08, 0x0b, 0x88, 0xfd, 0x87, 0x16, 0x5f, - 0x58, 0x7a, 0xd7, 0xd1, 0x7b, 0x30, 0xb9, 0xe3, 0xc4, 0x8d, 0xad, 0xc5, 0x07, 0xad, 0x90, 0xab, - 0xc7, 0xe5, 0x38, 0x3d, 0xd3, 0x6b, 0x9c, 0xb4, 0x8f, 0x4c, 0xac, 0xf2, 0x56, 0x52, 0xc4, 0x70, - 0x07, 0x79, 0xb4, 0x0e, 0x23, 0xac, 0x8c, 0x99, 0x7f, 0x47, 0xdd, 0x58, 0x83, 0xbc, 0xd6, 0xd4, - 0xab, 0xf3, 0x4a, 0x42, 0x07, 0xeb, 0x44, 0xed, 0x2f, 0x15, 0xf9, 0x6e, 0x67, 0xdc, 0xf6, 0xd3, - 0x30, 0xdc, 0x0a, 0xdc, 0x85, 0xe5, 0x2a, 0x16, 0xb3, 0xa0, 0xae, 0x91, 0x1a, 0x2f, 0xc6, 0x12, - 0x8e, 0x5e, 0x05, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x4e, 0x53, 0x59, 0xc9, 0x28, 0xbb, 0xd0, 0x6a, - 0xb0, 0x1a, 0xc4, 0x77, 0x22, 0xf2, 0x1d, 0x8b, 0x0a, 0x05, 0x6b, 0xe8, 0xe8, 0x1a, 0x40, 0x2b, - 0x0c, 0x76, 0x3d, 0x97, 0xf9, 0x13, 0x16, 0x4d, 0x1b, 0x92, 0x9a, 0x82, 0x60, 0x0d, 0x0b, 0xbd, - 0x0a, 0x63, 0x6d, 0x3f, 0xe2, 0x1c, 0x8a, 0xb3, 0x2e, 0xc2, 0x31, 0x96, 0x12, 0xeb, 0x86, 0x3b, - 0x3a, 0x10, 0x9b, 0xb8, 0x68, 0x0e, 0x86, 0x62, 0x87, 0xd9, 0x44, 0x0c, 0xe6, 0x1b, 0x73, 0xae, - 0x51, 0x0c, 0x3d, 0x20, 0x33, 0xad, 0x80, 0x45, 0x45, 0xf4, 0x96, 0x74, 0xce, 0xe0, 0x67, 0xbd, - 0xb0, 0xa2, 0xee, 0xef, 0x5e, 0xd0, 0x5c, 0x33, 0x84, 0x75, 0xb6, 0x41, 0xcb, 0xfe, 0x5a, 0x19, - 0x20, 0x61, 0xc7, 0xd1, 0xfb, 0x1d, 0xe7, 0xd1, 0xb3, 0xdd, 0x19, 0xf8, 0xe3, 0x3b, 0x8c, 0xd0, - 0xf7, 0x59, 0x30, 0xe2, 0x34, 0x9b, 0x41, 0xc3, 0x89, 0xd9, 0x28, 0x17, 0xba, 0x9f, 0x87, 0xa2, - 0xfd, 0xb9, 0xa4, 0x06, 0xef, 0xc2, 0x0b, 0x72, 0xe1, 0x69, 0x90, 0x9e, 0xbd, 0xd0, 0x1b, 0x46, - 0x9f, 0x92, 0x52, 0x1a, 0x5f, 0x1e, 0x33, 0x69, 0x29, 0xad, 0xcc, 0x8e, 0x7e, 0x4d, 0x40, 0x43, - 0x77, 0x8c, 0x48, 0x7b, 0x03, 0xf9, 0x41, 0x27, 0x0c, 0xae, 0xb4, 0x57, 0x90, 0x3d, 0x54, 0xd3, - 0xbd, 0xc9, 0x06, 0xf3, 0x23, 0xb3, 0x68, 0xe2, 0x4f, 0x0f, 0x4f, 0xb2, 0x77, 0x61, 0xc2, 0x35, - 0xef, 0x76, 0xb1, 0x9a, 0x9e, 0xca, 0xa3, 0x9b, 0x62, 0x05, 0x92, 0xdb, 0x3c, 0x05, 0xc0, 0x69, - 0xc2, 0xa8, 0xc6, 0xfd, 0xfa, 0x96, 0xfd, 0x8d, 0x40, 0x58, 0xe3, 0xdb, 0xb9, 0x73, 0xb9, 0x17, - 0xc5, 0x64, 0x87, 0x62, 0x26, 0x97, 0xf6, 0xaa, 0xa8, 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x0c, 0x31, - 0xc7, 0xe0, 0x68, 0xba, 0x94, 0xaf, 0x4c, 0x34, 0x63, 0x5a, 0x24, 0x9b, 0x8a, 0xfd, 0x8d, 0xb0, - 0xa0, 0x80, 0x6e, 0xc8, 0xc0, 0x37, 0xd1, 0xb2, 0x7f, 0x27, 0x22, 0x2c, 0xf0, 0x4d, 0x79, 0xfe, - 0xe3, 0x49, 0x4c, 0x1b, 0x5e, 0x9e, 0x99, 0x7a, 0xc1, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, 0x66, - 0x74, 0x98, 0x86, 0xfc, 0xee, 0x99, 0x59, 0x1f, 0x92, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x69, 0x9a, - 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0xd8, 0xf3, 0xf7, 0xda, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, 0x2f, - 0xc1, 0xa2, 0xfe, 0x89, 0xde, 0xfa, 0x33, 0x3e, 0x4c, 0xa6, 0xb7, 0xe8, 0x23, 0xe5, 0x32, 0x7e, - 0x7f, 0x00, 0xc6, 0xcd, 0x25, 0x85, 0xae, 0x42, 0x59, 0x10, 0x51, 0x51, 0x58, 0xd5, 0x2e, 0x59, - 0x91, 0x00, 0x9c, 0xe0, 0xb0, 0xe0, 0xbb, 0xac, 0xba, 0x66, 0x87, 0x99, 0x04, 0xdf, 0x55, 0x10, - 0xac, 0x61, 0x51, 0x79, 0x69, 0x3d, 0x08, 0x62, 0x75, 0xa9, 0xa8, 0x75, 0x37, 0xcf, 0x4a, 0xb1, - 0x80, 0xd2, 0xcb, 0x64, 0x9b, 0x84, 0x3e, 0x69, 0x9a, 0xc1, 0xdd, 0xd4, 0x65, 0x72, 0x53, 0x07, - 0x62, 0x13, 0x97, 0xde, 0x92, 0x41, 0xc4, 0x16, 0xb2, 0x90, 0xca, 0x12, 0xbb, 0xd6, 0x3a, 0x77, - 0xb1, 0x97, 0x70, 0xf4, 0x79, 0x78, 0x4c, 0x79, 0xc4, 0x63, 0xae, 0xa8, 0x96, 0x2d, 0x0e, 0x19, - 0x4a, 0x94, 0xc7, 0x16, 0xb2, 0xd1, 0x70, 0x5e, 0x7d, 0xf4, 0x3a, 0x8c, 0x0b, 0xce, 0x5d, 0x52, - 0x1c, 0x36, 0x6d, 0x27, 0x6e, 0x1a, 0x50, 0x9c, 0xc2, 0x96, 0xe1, 0xe9, 0x18, 0xf3, 0x2c, 0x29, - 0x94, 0x3a, 0xc3, 0xd3, 0xe9, 0x70, 0xdc, 0x51, 0x03, 0xcd, 0xc1, 0x04, 0x67, 0xad, 0x3c, 0x7f, - 0x93, 0xcf, 0x89, 0x70, 0xb7, 0x51, 0x5b, 0xea, 0xb6, 0x09, 0xc6, 0x69, 0x7c, 0xf4, 0x0a, 0x8c, - 0x3a, 0x61, 0x63, 0xcb, 0x8b, 0x49, 0x23, 0x6e, 0x87, 0xdc, 0x0f, 0x47, 0x33, 0x3e, 0x99, 0xd3, - 0x60, 0xd8, 0xc0, 0xb4, 0xdf, 0x87, 0x53, 0x19, 0x9e, 0x7a, 0x74, 0xe1, 0x38, 0x2d, 0x4f, 0x7e, - 0x53, 0xca, 0x42, 0x75, 0xae, 0xb6, 0x2c, 0xbf, 0x46, 0xc3, 0xa2, 0xab, 0x93, 0x79, 0xf4, 0x69, - 0x09, 0x5c, 0xd4, 0xea, 0x5c, 0x92, 0x00, 0x9c, 0xe0, 0xd8, 0xff, 0xab, 0x00, 0x13, 0x19, 0xca, - 0x77, 0x96, 0x44, 0x24, 0x25, 0x7b, 0x24, 0x39, 0x43, 0xcc, 0x68, 0x87, 0x85, 0x23, 0x44, 0x3b, - 0x2c, 0xf6, 0x8a, 0x76, 0x38, 0xf0, 0x41, 0xa2, 0x1d, 0x9a, 0x23, 0x36, 0xd8, 0xd7, 0x88, 0x65, - 0x44, 0x48, 0x1c, 0x3a, 0x62, 0x84, 0x44, 0x63, 0xd0, 0x87, 0xfb, 0x18, 0xf4, 0x1f, 0x2a, 0xc0, - 0x64, 0xda, 0x48, 0xee, 0x04, 0xd4, 0xb1, 0x6f, 0x18, 0xea, 0xd8, 0xec, 0x94, 0x3c, 0x69, 0xd3, - 0xbd, 0x3c, 0xd5, 0x2c, 0x4e, 0xa9, 0x66, 0x3f, 0xd9, 0x17, 0xb5, 0xee, 0x6a, 0xda, 0xbf, 0x57, - 0x80, 0x33, 0xe9, 0x2a, 0x0b, 0x4d, 0xc7, 0xdb, 0x39, 0x81, 0xb1, 0xb9, 0x6d, 0x8c, 0xcd, 0x73, - 0xfd, 0x7c, 0x0d, 0xeb, 0x5a, 0xee, 0x00, 0xdd, 0x4b, 0x0d, 0xd0, 0xd5, 0xfe, 0x49, 0x76, 0x1f, - 0xa5, 0xaf, 0x17, 0xe1, 0x62, 0x66, 0xbd, 0x44, 0x9b, 0xb9, 0x64, 0x68, 0x33, 0xaf, 0xa5, 0xb4, - 0x99, 0x76, 0xf7, 0xda, 0xc7, 0xa3, 0xde, 0x14, 0x2e, 0x94, 0x2c, 0x22, 0xde, 0x43, 0xaa, 0x36, - 0x0d, 0x17, 0x4a, 0x45, 0x08, 0x9b, 0x74, 0xbf, 0x91, 0x54, 0x9a, 0xff, 0xd6, 0x82, 0x73, 0x99, - 0x73, 0x73, 0x02, 0x2a, 0xac, 0x55, 0x53, 0x85, 0xf5, 0x74, 0xdf, 0xab, 0x35, 0x47, 0xa7, 0xf5, - 0x1b, 0x03, 0x39, 0xdf, 0xc2, 0x04, 0xf4, 0xdb, 0x30, 0xe2, 0x34, 0x1a, 0x24, 0x8a, 0x56, 0x02, - 0x57, 0x45, 0x88, 0x7b, 0x8e, 0xc9, 0x59, 0x49, 0xf1, 0xe1, 0x7e, 0x65, 0x26, 0x4d, 0x22, 0x01, - 0x63, 0x9d, 0x82, 0x19, 0xd4, 0xb2, 0x70, 0xac, 0x41, 0x2d, 0xaf, 0x01, 0xec, 0x2a, 0x6e, 0x3d, - 0x2d, 0xe4, 0x6b, 0x7c, 0xbc, 0x86, 0x85, 0xbe, 0x00, 0xa5, 0x48, 0x5c, 0xe3, 0x62, 0x29, 0xbe, - 0xd0, 0xe7, 0x5c, 0x39, 0xeb, 0xa4, 0x69, 0xfa, 0xea, 0x2b, 0x7d, 0x88, 0x22, 0x89, 0xbe, 0x0d, - 0x26, 0x23, 0x1e, 0x0a, 0x66, 0xa1, 0xe9, 0x44, 0xcc, 0x0f, 0x42, 0xac, 0x42, 0xe6, 0x80, 0x5f, - 0x4f, 0xc1, 0x70, 0x07, 0x36, 0x5a, 0x92, 0x1f, 0xc5, 0xe2, 0xd6, 0xf0, 0x85, 0x79, 0x39, 0xf9, - 0x20, 0x91, 0xc2, 0xec, 0x74, 0x7a, 0xf8, 0xd9, 0xc0, 0x6b, 0x35, 0xd1, 0x17, 0x00, 0xe8, 0xf2, - 0x11, 0xba, 0x84, 0xe1, 0xfc, 0xc3, 0x93, 0x9e, 0x2a, 0x6e, 0xa6, 0xe5, 0x27, 0x73, 0x5e, 0xac, - 0x2a, 0x22, 0x58, 0x23, 0x68, 0xff, 0xd0, 0x00, 0x3c, 0xde, 0xe5, 0x8c, 0x44, 0x73, 0xe6, 0x13, - 0xe8, 0x33, 0x69, 0xe1, 0x7a, 0x26, 0xb3, 0xb2, 0x21, 0x6d, 0xa7, 0x96, 0x62, 0xe1, 0x03, 0x2f, - 0xc5, 0x1f, 0xb0, 0x34, 0xb5, 0x07, 0x37, 0xe6, 0xfb, 0xec, 0x11, 0xcf, 0xfe, 0x63, 0xd4, 0x83, - 0x6c, 0x64, 0x28, 0x13, 0xae, 0xf5, 0xdd, 0x9d, 0xbe, 0xb5, 0x0b, 0x27, 0xab, 0xfc, 0xfd, 0x92, - 0x05, 0x4f, 0x64, 0xf6, 0xd7, 0x30, 0xd9, 0xb8, 0x0a, 0xe5, 0x06, 0x2d, 0xd4, 0x7c, 0xd5, 0x12, - 0x27, 0x5e, 0x09, 0xc0, 0x09, 0x8e, 0x61, 0x99, 0x51, 0xe8, 0x69, 0x99, 0xf1, 0x2f, 0x2d, 0xe8, - 0xd8, 0x1f, 0x27, 0x70, 0x50, 0x2f, 0x9b, 0x07, 0xf5, 0xc7, 0xfb, 0x99, 0xcb, 0x9c, 0x33, 0xfa, - 0x8f, 0x27, 0xe0, 0x6c, 0x8e, 0xaf, 0xc6, 0x2e, 0x4c, 0x6d, 0x36, 0x88, 0xe9, 0x05, 0x28, 0x3e, - 0x26, 0xd3, 0x61, 0xb2, 0xab, 0xcb, 0x20, 0xcb, 0x47, 0x34, 0xd5, 0x81, 0x82, 0x3b, 0x9b, 0x40, - 0x5f, 0xb2, 0xe0, 0xb4, 0x73, 0x3f, 0xea, 0x48, 0x60, 0x2a, 0xd6, 0xcc, 0x8b, 0x99, 0x4a, 0x90, - 0x1e, 0x09, 0x4f, 0x79, 0x82, 0xa6, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x66, 0x28, 0x65, - 0xe7, 0xbb, 0xf8, 0xa9, 0x66, 0x39, 0xd5, 0xf0, 0x23, 0x5b, 0x42, 0xb0, 0xa2, 0x83, 0xde, 0x81, - 0xf2, 0xa6, 0xf4, 0x74, 0xcb, 0xb8, 0x12, 0x92, 0x81, 0xec, 0xee, 0xff, 0xc7, 0x1f, 0x28, 0x15, - 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa1, 0xe8, 0x6f, 0x44, 0xdd, 0x72, 0x1c, 0xa5, 0x6c, 0x9a, 0xb8, - 0x37, 0xf8, 0xea, 0x52, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0x14, 0xc3, 0x75, 0x57, 0x68, 0xf0, 0x32, - 0xcf, 0x70, 0x3c, 0x5f, 0xcd, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0xab, 0x98, 0x92, 0x40, 0x35, 0x18, - 0x64, 0x0e, 0x0e, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, 0xbb, 0x38, 0x0a, 0x71, 0x97, 0x71, 0x86, 0x80, - 0x39, 0x21, 0xb4, 0x06, 0x43, 0x0d, 0x96, 0x0f, 0x47, 0x04, 0xac, 0xfe, 0x54, 0xa6, 0xae, 0xae, - 0x4b, 0xa2, 0x20, 0xa1, 0xba, 0x62, 0x18, 0x58, 0xd0, 0x62, 0x54, 0x49, 0x6b, 0x6b, 0x23, 0x12, - 0xf9, 0xdb, 0xb2, 0xa9, 0x76, 0xc9, 0x7f, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, - 0x61, 0xa3, 0x21, 0xfc, 0x1f, 0x32, 0x95, 0x76, 0xa6, 0x43, 0xff, 0xfc, 0xd0, 0xc1, 0x7e, 0xa5, - 0xb0, 0xb4, 0x80, 0x0b, 0x1b, 0x0d, 0xb4, 0x0a, 0xc3, 0x1b, 0xdc, 0x05, 0x58, 0xe8, 0xe5, 0x9e, - 0xca, 0xf6, 0x4e, 0xee, 0xf0, 0x12, 0xe6, 0x76, 0xfb, 0x02, 0x80, 0x25, 0x11, 0x16, 0x82, 0x53, - 0xb9, 0x32, 0x8b, 0x58, 0xd4, 0xb3, 0x47, 0x73, 0x3f, 0xe7, 0xf7, 0x73, 0xe2, 0x10, 0x8d, 0x35, - 0x8a, 0x74, 0x55, 0x3b, 0x32, 0x89, 0xa6, 0x88, 0xd5, 0x91, 0xb9, 0xaa, 0x7b, 0xe4, 0x17, 0xe5, - 0xab, 0x5a, 0x21, 0xe1, 0x84, 0x28, 0xda, 0x86, 0xb1, 0xdd, 0xa8, 0xb5, 0x45, 0xe4, 0x96, 0x66, - 0xa1, 0x3b, 0x72, 0xae, 0xb0, 0xbb, 0x02, 0xd1, 0x0b, 0xe3, 0xb6, 0xd3, 0xec, 0x38, 0x85, 0xd8, - 0xab, 0xf6, 0x5d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xaf, 0x1d, 0xac, 0xef, 0xc5, 0x44, - 0x04, 0xaf, 0xce, 0x1c, 0xfe, 0x37, 0x39, 0x4a, 0xe7, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, - 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xa6, 0x94, 0x99, 0xc5, 0x56, 0x1b, 0x14, 0x76, 0x5a, - 0x26, 0xa4, 0xd8, 0x29, 0xd9, 0xda, 0x0a, 0xe2, 0xc0, 0x4f, 0x9d, 0xd0, 0x53, 0xf9, 0xa7, 0x64, - 0x2d, 0x03, 0xbf, 0xf3, 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x17, 0xc6, 0x5b, 0x41, 0x18, - 0xdf, 0x0f, 0x42, 0xb9, 0xbe, 0x50, 0x17, 0xbd, 0x82, 0x81, 0x29, 0x5a, 0x64, 0xc1, 0xd4, 0x4d, - 0x08, 0x4e, 0xd1, 0x44, 0x9f, 0x83, 0xe1, 0xa8, 0xe1, 0x34, 0xc9, 0xf2, 0xed, 0xe9, 0x53, 0xf9, - 0xd7, 0x4f, 0x9d, 0xa3, 0xe4, 0xac, 0x2e, 0x36, 0x39, 0x02, 0x05, 0x4b, 0x72, 0x68, 0x09, 0x06, - 0x59, 0x46, 0x04, 0x16, 0x77, 0x3b, 0x27, 0x26, 0x54, 0x87, 0x85, 0x29, 0x3f, 0x9b, 0x58, 0x31, - 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0xec, 0x75, 0x10, 0x4d, 0x9f, 0xc9, 0xdf, 0x03, 0x82, 0x2b, 0xbf, - 0x5d, 0xef, 0xb6, 0x07, 0x14, 0x12, 0x4e, 0x88, 0xd2, 0x93, 0x99, 0x9e, 0xa6, 0x67, 0xbb, 0x18, - 0xb4, 0xe4, 0x9e, 0xa5, 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, 0xff, 0xee, 0x70, 0x27, 0xcf, - 0xc2, 0x04, 0xb2, 0xbf, 0x6c, 0x75, 0xbc, 0xd5, 0x7d, 0xba, 0x5f, 0xfd, 0xd0, 0x31, 0x72, 0xab, - 0x5f, 0xb2, 0xe0, 0x6c, 0x2b, 0xf3, 0x43, 0x04, 0x03, 0xd0, 0x9f, 0x9a, 0x89, 0x7f, 0xba, 0x8a, - 0x8d, 0x9f, 0x0d, 0xc7, 0x39, 0x2d, 0xa5, 0x25, 0x82, 0xe2, 0x07, 0x96, 0x08, 0x56, 0xa0, 0xc4, - 0x98, 0xcc, 0x1e, 0xf9, 0xe1, 0xd2, 0x82, 0x11, 0x63, 0x25, 0x16, 0x44, 0x45, 0xac, 0x48, 0xa0, - 0x1f, 0xb4, 0xe0, 0x42, 0xba, 0xeb, 0x98, 0x30, 0xb0, 0x88, 0x24, 0xcf, 0x65, 0xc1, 0x25, 0xf1, - 0xfd, 0x17, 0x6a, 0xdd, 0x90, 0x0f, 0x7b, 0x21, 0xe0, 0xee, 0x8d, 0xa1, 0x6a, 0x86, 0x30, 0x3a, - 0x64, 0x2a, 0xe0, 0xfb, 0x10, 0x48, 0x5f, 0x84, 0xd1, 0x9d, 0xa0, 0xed, 0xc7, 0xc2, 0xfe, 0x45, - 0x78, 0x2c, 0xb2, 0x07, 0xe7, 0x15, 0xad, 0x1c, 0x1b, 0x58, 0x29, 0x31, 0xb6, 0xf4, 0xd0, 0x62, - 0xec, 0xdb, 0xa9, 0x84, 0xf2, 0xe5, 0xfc, 0x88, 0x85, 0x42, 0xe2, 0x3f, 0x42, 0x5a, 0xf9, 0x93, - 0x95, 0x8d, 0x7e, 0xda, 0xca, 0x60, 0xea, 0xb9, 0xb4, 0xfc, 0x9a, 0x29, 0x2d, 0x5f, 0x4e, 0x4b, - 0xcb, 0x1d, 0xca, 0x57, 0x43, 0x50, 0xee, 0x3f, 0xec, 0x75, 0xbf, 0x71, 0xe4, 0xec, 0x26, 0x5c, - 0xea, 0x75, 0x2d, 0x31, 0x43, 0x28, 0x57, 0x3d, 0xb5, 0x25, 0x86, 0x50, 0xee, 0x72, 0x15, 0x33, - 0x48, 0xbf, 0x81, 0x46, 0xec, 0xff, 0x61, 0x41, 0xb1, 0x16, 0xb8, 0x27, 0xa0, 0x4c, 0xfe, 0xac, - 0xa1, 0x4c, 0x7e, 0x3c, 0x27, 0xd1, 0x7f, 0xae, 0xea, 0x78, 0x31, 0xa5, 0x3a, 0xbe, 0x90, 0x47, - 0xa0, 0xbb, 0xa2, 0xf8, 0x27, 0x8a, 0x30, 0x52, 0x0b, 0x5c, 0x65, 0x85, 0xfc, 0x1b, 0x0f, 0x63, - 0x85, 0x9c, 0x1b, 0x16, 0x56, 0xa3, 0xcc, 0xec, 0xa7, 0xa4, 0x13, 0xde, 0x5f, 0x30, 0x63, 0xe4, - 0x7b, 0xc4, 0xdb, 0xdc, 0x8a, 0x89, 0x9b, 0xfe, 0x9c, 0x93, 0x33, 0x46, 0xfe, 0x6f, 0x16, 0x4c, - 0xa4, 0x5a, 0x47, 0x4d, 0x18, 0x6b, 0xea, 0x9a, 0x40, 0xb1, 0x4e, 0x1f, 0x4a, 0x89, 0x28, 0x8c, - 0x39, 0xb5, 0x22, 0x6c, 0x12, 0x47, 0xb3, 0x00, 0xea, 0xa5, 0x4e, 0x6a, 0xc0, 0x18, 0xd7, 0xaf, - 0x9e, 0xf2, 0x22, 0xac, 0x61, 0xa0, 0x97, 0x60, 0x24, 0x0e, 0x5a, 0x41, 0x33, 0xd8, 0xdc, 0xbb, - 0x49, 0x64, 0x68, 0x1b, 0x65, 0xa2, 0xb5, 0x96, 0x80, 0xb0, 0x8e, 0x67, 0xff, 0x54, 0x91, 0x7f, - 0xa8, 0x1f, 0x7b, 0xdf, 0x5c, 0x93, 0x1f, 0xed, 0x35, 0xf9, 0x75, 0x0b, 0x26, 0x69, 0xeb, 0xcc, - 0x5c, 0x44, 0x5e, 0xb6, 0x2a, 0xfd, 0x8e, 0xd5, 0x25, 0xfd, 0xce, 0x65, 0x7a, 0x76, 0xb9, 0x41, - 0x3b, 0x16, 0x1a, 0x34, 0xed, 0x70, 0xa2, 0xa5, 0x58, 0x40, 0x05, 0x1e, 0x09, 0x43, 0xe1, 0x03, - 0xa5, 0xe3, 0x91, 0x30, 0xc4, 0x02, 0x2a, 0xb3, 0xf3, 0x0c, 0xe4, 0x64, 0xe7, 0x61, 0x81, 0xfa, - 0x84, 0x61, 0x81, 0x60, 0x7b, 0xb4, 0x40, 0x7d, 0xd2, 0xe2, 0x20, 0xc1, 0xb1, 0x7f, 0xbe, 0x08, - 0xa3, 0xb5, 0xc0, 0x4d, 0xde, 0xca, 0x5e, 0x34, 0xde, 0xca, 0x2e, 0xa5, 0xde, 0xca, 0x26, 0x75, - 0xdc, 0x6f, 0xbe, 0x8c, 0x7d, 0x58, 0x2f, 0x63, 0xff, 0xc2, 0x62, 0xb3, 0x56, 0x5d, 0xad, 0x8b, - 0xec, 0xc0, 0xcf, 0xc3, 0x08, 0x3b, 0x90, 0x98, 0xd3, 0x9d, 0x7c, 0x40, 0x62, 0x81, 0xf7, 0x57, - 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x50, 0x8a, 0x88, 0x13, 0x36, 0xb6, 0xd4, 0x19, 0x27, 0x9e, - 0x57, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x66, 0x12, 0x23, 0xae, 0x98, 0x9f, 0xe7, 0x56, 0xef, 0x0f, - 0xdf, 0x22, 0xf9, 0x81, 0xe1, 0xec, 0x7b, 0x80, 0x3a, 0xf1, 0xfb, 0x08, 0x8e, 0x54, 0x31, 0x83, - 0x23, 0x95, 0x3b, 0x02, 0x23, 0xfd, 0x99, 0x05, 0xe3, 0xb5, 0xc0, 0xa5, 0x5b, 0xf7, 0x1b, 0x69, - 0x9f, 0xea, 0x01, 0x32, 0x87, 0xba, 0x04, 0xc8, 0xfc, 0xfb, 0x16, 0x0c, 0xd7, 0x02, 0xf7, 0x04, - 0xf4, 0xee, 0xaf, 0x99, 0x7a, 0xf7, 0xc7, 0x72, 0x96, 0x44, 0x8e, 0xaa, 0xfd, 0x17, 0x8b, 0x30, - 0x46, 0xfb, 0x19, 0x6c, 0xca, 0x59, 0x32, 0x46, 0xc4, 0xea, 0x63, 0x44, 0x28, 0x9b, 0x1b, 0x34, - 0x9b, 0xc1, 0xfd, 0xf4, 0x8c, 0x2d, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x59, 0x28, 0xb5, 0x42, 0xb2, - 0xeb, 0x05, 0x82, 0x7f, 0xd4, 0x5e, 0x31, 0x6a, 0xa2, 0x1c, 0x2b, 0x0c, 0x2a, 0x77, 0x45, 0x9e, - 0xdf, 0x20, 0x32, 0xc9, 0xf6, 0x00, 0xcb, 0xc3, 0xc5, 0x23, 0x5f, 0x6b, 0xe5, 0xd8, 0xc0, 0x42, - 0xf7, 0xa0, 0xcc, 0xfe, 0xb3, 0x13, 0xe5, 0xe8, 0x79, 0x83, 0x44, 0xba, 0x09, 0x41, 0x00, 0x27, - 0xb4, 0xd0, 0x35, 0x80, 0x58, 0x46, 0x47, 0x8e, 0x44, 0x8c, 0x1b, 0xc5, 0x6b, 0xab, 0xb8, 0xc9, - 0x11, 0xd6, 0xb0, 0xd0, 0x33, 0x50, 0x8e, 0x1d, 0xaf, 0x79, 0xcb, 0xf3, 0x49, 0xc4, 0x54, 0xce, - 0x45, 0x99, 0x4d, 0x42, 0x14, 0xe2, 0x04, 0x4e, 0x79, 0x1d, 0xe6, 0x00, 0xce, 0xb3, 0x8e, 0x95, - 0x18, 0x36, 0xe3, 0x75, 0x6e, 0xa9, 0x52, 0xac, 0x61, 0xd8, 0xaf, 0xc0, 0x99, 0x5a, 0xe0, 0xd6, - 0x82, 0x30, 0x5e, 0x0a, 0xc2, 0xfb, 0x4e, 0xe8, 0xca, 0xf9, 0xab, 0xc8, 0xc4, 0x06, 0xf4, 0xec, - 0x19, 0xe4, 0x3b, 0xd3, 0x48, 0x59, 0xf0, 0x02, 0xe3, 0x76, 0x8e, 0xe8, 0xd4, 0xd1, 0x60, 0xf7, - 0xae, 0x4a, 0x30, 0x78, 0xdd, 0x89, 0x09, 0xba, 0xcd, 0x92, 0x92, 0x25, 0x57, 0x90, 0xa8, 0xfe, - 0xb4, 0x96, 0x94, 0x2c, 0x01, 0x66, 0xde, 0x59, 0x66, 0x7d, 0xfb, 0x67, 0x07, 0xd8, 0x69, 0x94, - 0xca, 0xb7, 0x87, 0xbe, 0x08, 0xe3, 0x11, 0xb9, 0xe5, 0xf9, 0xed, 0x07, 0x52, 0x08, 0xef, 0xe2, - 0x96, 0x53, 0x5f, 0xd4, 0x31, 0xb9, 0x2a, 0xcf, 0x2c, 0xc3, 0x29, 0x6a, 0x74, 0x9e, 0xc2, 0xb6, - 0x3f, 0x17, 0xdd, 0x89, 0x48, 0x28, 0xf2, 0xbd, 0xb1, 0x79, 0xc2, 0xb2, 0x10, 0x27, 0x70, 0xba, - 0x2e, 0xd9, 0x9f, 0xd5, 0xc0, 0xc7, 0x41, 0x10, 0xcb, 0x95, 0xcc, 0x32, 0x06, 0x69, 0xe5, 0xd8, - 0xc0, 0x42, 0x4b, 0x80, 0xa2, 0x76, 0xab, 0xd5, 0x64, 0x0f, 0xfb, 0x4e, 0xf3, 0x7a, 0x18, 0xb4, - 0x5b, 0xfc, 0xd5, 0xb3, 0xc8, 0x03, 0x13, 0xd6, 0x3b, 0xa0, 0x38, 0xa3, 0x06, 0x3d, 0x7d, 0x36, - 0x22, 0xf6, 0x9b, 0xad, 0xee, 0xa2, 0x50, 0xaf, 0xd7, 0x59, 0x11, 0x96, 0x30, 0xba, 0x98, 0x58, - 0xf3, 0x1c, 0x73, 0x28, 0x59, 0x4c, 0x58, 0x95, 0x62, 0x0d, 0x03, 0x2d, 0xc2, 0x70, 0xb4, 0x17, - 0x35, 0x62, 0x11, 0x91, 0x29, 0x27, 0x73, 0x67, 0x9d, 0xa1, 0x68, 0xd9, 0x24, 0x78, 0x15, 0x2c, - 0xeb, 0xa2, 0x1d, 0x18, 0xbf, 0xef, 0xf9, 0x6e, 0x70, 0x3f, 0x92, 0x13, 0x55, 0xca, 0x57, 0x8d, - 0xde, 0xe3, 0x98, 0xa9, 0xc9, 0x36, 0xe6, 0xed, 0x9e, 0x41, 0x0c, 0xa7, 0x88, 0xdb, 0xdf, 0xc5, - 0xee, 0x5e, 0x96, 0x8c, 0x2c, 0x6e, 0x87, 0x04, 0xed, 0xc0, 0x58, 0x8b, 0xad, 0x30, 0x11, 0x2a, - 0x5b, 0x2c, 0x93, 0x17, 0xfb, 0x14, 0xa2, 0xef, 0xd3, 0x73, 0x4d, 0x29, 0xb9, 0x98, 0x74, 0x52, - 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xdf, 0x11, 0x3b, 0xe2, 0xeb, 0x5c, 0x32, 0x1e, 0x16, 0x96, - 0xcc, 0x42, 0x0c, 0x98, 0xc9, 0x57, 0xd1, 0x24, 0x03, 0x28, 0xac, 0xa1, 0xb1, 0xac, 0x8b, 0xde, - 0x64, 0x8f, 0xe2, 0xfc, 0x5c, 0xed, 0x95, 0x13, 0x9a, 0x63, 0x19, 0xef, 0xdf, 0xa2, 0x22, 0xd6, - 0x88, 0xa0, 0x5b, 0x30, 0x26, 0x72, 0x57, 0x09, 0x1d, 0x5c, 0xd1, 0xd0, 0xb1, 0x8c, 0x61, 0x1d, - 0x78, 0x98, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x09, 0x17, 0xb4, 0x44, 0x8e, 0xd7, 0x43, 0x87, 0x3d, - 0x94, 0x7a, 0x6c, 0xcf, 0x6a, 0xc7, 0xf4, 0x13, 0x07, 0xfb, 0x95, 0x0b, 0x6b, 0xdd, 0x10, 0x71, - 0x77, 0x3a, 0xe8, 0x36, 0x9c, 0xe1, 0x0e, 0x83, 0x55, 0xe2, 0xb8, 0x4d, 0xcf, 0x57, 0xf7, 0x00, - 0x5f, 0xf6, 0xe7, 0x0e, 0xf6, 0x2b, 0x67, 0xe6, 0xb2, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x1a, 0x94, - 0x5d, 0x3f, 0x12, 0x63, 0x30, 0x64, 0xe4, 0x28, 0x2d, 0x57, 0x57, 0xeb, 0xea, 0xfb, 0x93, 0x3f, - 0x38, 0xa9, 0x80, 0x36, 0xb9, 0x1e, 0x4e, 0x89, 0xbd, 0xc3, 0xf9, 0xf9, 0xe8, 0xc5, 0x92, 0x30, - 0x5c, 0x86, 0xb8, 0x02, 0x5a, 0x99, 0xdc, 0x1a, 0xde, 0x44, 0x06, 0x61, 0xf4, 0x06, 0x20, 0xca, - 0x17, 0x7a, 0x0d, 0x32, 0xd7, 0x60, 0x11, 0xcb, 0x99, 0xda, 0xb2, 0x64, 0xb8, 0x68, 0xa0, 0x7a, - 0x07, 0x06, 0xce, 0xa8, 0x85, 0x6e, 0xd0, 0x73, 0x53, 0x2f, 0x15, 0xa6, 0xc3, 0x52, 0x96, 0x98, - 0xae, 0x92, 0x56, 0x48, 0x1a, 0x4e, 0x4c, 0x5c, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0x57, 0xb7, 0x4a, - 0x5e, 0x04, 0x66, 0x94, 0x8e, 0xce, 0x04, 0x46, 0x54, 0x0c, 0xdf, 0x0a, 0xa2, 0x78, 0x95, 0xc4, - 0xf7, 0x83, 0x70, 0x5b, 0x04, 0x45, 0x4b, 0xe2, 0x73, 0x26, 0x20, 0xac, 0xe3, 0x51, 0xb6, 0x9b, - 0xbd, 0x4a, 0x2f, 0x57, 0xd9, 0x83, 0x60, 0x29, 0xd9, 0x27, 0x37, 0x78, 0x31, 0x96, 0x70, 0x89, - 0xba, 0x5c, 0x5b, 0x60, 0x8f, 0x7b, 0x29, 0xd4, 0xe5, 0xda, 0x02, 0x96, 0x70, 0x44, 0x3a, 0xf3, - 0xbf, 0x8e, 0xe7, 0x2b, 0x51, 0x3b, 0x6f, 0x9f, 0x3e, 0x53, 0xc0, 0xfa, 0x30, 0xa9, 0x32, 0xcf, - 0xf2, 0x68, 0x71, 0xd1, 0xf4, 0x04, 0x5b, 0x24, 0xfd, 0x87, 0x9a, 0x53, 0x6a, 0xe9, 0xe5, 0x14, - 0x25, 0xdc, 0x41, 0xdb, 0x88, 0x9b, 0x32, 0xd9, 0x33, 0xf9, 0xd4, 0x55, 0x28, 0x47, 0xed, 0x75, - 0x37, 0xd8, 0x71, 0x3c, 0x9f, 0xbd, 0xc5, 0x69, 0x3c, 0x5d, 0x5d, 0x02, 0x70, 0x82, 0x83, 0x96, - 0xa0, 0xe4, 0x48, 0x9d, 0x33, 0xca, 0x0f, 0x92, 0xa0, 0x34, 0xcd, 0xdc, 0x6f, 0x58, 0x6a, 0x99, - 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x09, 0x37, 0x31, 0x1e, 0x3a, 0x82, 0xbd, 0x95, 0x69, 0x7e, 0x00, - 0x75, 0x1d, 0x88, 0x4d, 0x5c, 0xf4, 0x05, 0x18, 0xa7, 0x54, 0x92, 0x83, 0x6d, 0xfa, 0x74, 0x3f, - 0x27, 0xa2, 0x96, 0x54, 0x44, 0xaf, 0x8c, 0x53, 0xc4, 0x90, 0x0b, 0xe7, 0x9d, 0x76, 0x1c, 0x30, - 0xbd, 0xbd, 0xb9, 0xfe, 0xd7, 0x82, 0x6d, 0xe2, 0xb3, 0x27, 0xb3, 0xd2, 0xfc, 0xa5, 0x83, 0xfd, - 0xca, 0xf9, 0xb9, 0x2e, 0x78, 0xb8, 0x2b, 0x15, 0x74, 0x07, 0x46, 0xe2, 0xa0, 0xc9, 0x2c, 0xf2, - 0xe9, 0x85, 0x78, 0x36, 0x3f, 0xee, 0xd0, 0x9a, 0x42, 0xd3, 0x75, 0x56, 0xaa, 0x2a, 0xd6, 0xe9, - 0xa0, 0x35, 0xbe, 0xc7, 0x58, 0x44, 0x56, 0x12, 0x4d, 0x3f, 0x96, 0x3f, 0x30, 0x2a, 0x70, 0xab, - 0xb9, 0x05, 0x45, 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x61, 0xaa, 0x15, 0x7a, 0x01, 0x5b, 0xd8, 0xea, - 0xcd, 0x64, 0xda, 0xcc, 0x23, 0x51, 0x4b, 0x23, 0xe0, 0xce, 0x3a, 0x54, 0xa6, 0x95, 0x85, 0xd3, - 0xe7, 0x78, 0x52, 0x32, 0xce, 0xe7, 0xf3, 0x32, 0xac, 0xa0, 0x68, 0x85, 0x9d, 0xcb, 0x5c, 0xfa, - 0x9c, 0x9e, 0xc9, 0x0f, 0x2e, 0xa1, 0x4b, 0xa9, 0x9c, 0x3d, 0x53, 0x7f, 0x71, 0x42, 0x81, 0xde, - 0x1b, 0xd1, 0x96, 0x13, 0x92, 0x5a, 0x18, 0x34, 0x48, 0xa4, 0x05, 0x81, 0x7e, 0x9c, 0x07, 0x8e, - 0xa4, 0xf7, 0x46, 0x3d, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xae, 0x96, 0x8b, 0x9b, 0x72, 0xbd, 0xd1, - 0xf4, 0xf9, 0x2e, 0xf6, 0x4d, 0x29, 0x16, 0x39, 0x59, 0x8b, 0x46, 0x71, 0x84, 0x53, 0x34, 0xd1, - 0xb7, 0xc1, 0xa4, 0x88, 0xb3, 0x94, 0x8c, 0xfb, 0x85, 0xc4, 0x70, 0x12, 0xa7, 0x60, 0xb8, 0x03, - 0x9b, 0x87, 0xbe, 0x76, 0xd6, 0x9b, 0x44, 0x2c, 0xc2, 0x5b, 0x9e, 0xbf, 0x1d, 0x4d, 0x5f, 0x64, - 0x5f, 0x2d, 0x42, 0x5f, 0xa7, 0xa1, 0x38, 0xa3, 0x06, 0x5a, 0x83, 0xc9, 0x56, 0x48, 0xc8, 0x0e, - 0xe3, 0xb1, 0xc4, 0x75, 0x59, 0xe1, 0xde, 0xc0, 0xb4, 0x27, 0xb5, 0x14, 0xec, 0x30, 0xa3, 0x0c, - 0x77, 0x50, 0x98, 0xf9, 0x56, 0x98, 0xea, 0xb8, 0x0f, 0x8f, 0x14, 0x84, 0xfe, 0x4f, 0x07, 0xa1, - 0xac, 0x5e, 0x16, 0xd0, 0x55, 0xf3, 0xc1, 0xe8, 0x5c, 0xfa, 0xc1, 0xa8, 0x44, 0x05, 0x1c, 0xfd, - 0x8d, 0x68, 0xcd, 0xb0, 0x36, 0x2c, 0xe4, 0xa7, 0x7c, 0xd3, 0x45, 0x94, 0x9e, 0x9e, 0x8b, 0x9a, - 0xa2, 0xa8, 0xd8, 0xf7, 0xcb, 0xd3, 0x40, 0x57, 0xdd, 0x53, 0x9f, 0x19, 0x97, 0xd1, 0x93, 0x54, - 0xca, 0x73, 0x97, 0x6b, 0xe9, 0x14, 0xa4, 0x35, 0x5a, 0x88, 0x39, 0x8c, 0x49, 0xc3, 0x94, 0x79, - 0x63, 0xd2, 0xf0, 0xf0, 0x43, 0x4a, 0xc3, 0x92, 0x00, 0x4e, 0x68, 0xa1, 0x26, 0x4c, 0x35, 0xcc, - 0xec, 0xb1, 0xca, 0x5b, 0xf1, 0xc9, 0x9e, 0x79, 0x5c, 0xdb, 0x5a, 0xaa, 0xbe, 0x85, 0x34, 0x15, - 0xdc, 0x49, 0x18, 0xbd, 0x0a, 0xa5, 0xf7, 0x82, 0x88, 0x2d, 0x75, 0xc1, 0xc1, 0x48, 0xaf, 0xae, - 0xd2, 0x9b, 0xb7, 0xeb, 0xac, 0xfc, 0x70, 0xbf, 0x32, 0x52, 0x0b, 0x5c, 0xf9, 0x17, 0xab, 0x0a, - 0xe8, 0x01, 0x9c, 0x31, 0xce, 0x7d, 0xd5, 0x5d, 0xe8, 0xbf, 0xbb, 0x17, 0x44, 0x73, 0x67, 0x96, - 0xb3, 0x28, 0xe1, 0xec, 0x06, 0xe8, 0x61, 0xea, 0x07, 0x22, 0xf3, 0xb2, 0xe4, 0x92, 0x18, 0x33, - 0x54, 0xd6, 0x7d, 0xfa, 0x53, 0x08, 0xb8, 0xb3, 0x8e, 0xfd, 0x2b, 0xfc, 0x21, 0x46, 0xa8, 0x6b, - 0x49, 0xd4, 0x6e, 0x9e, 0x44, 0x62, 0xaf, 0x45, 0x43, 0x93, 0xfc, 0xd0, 0x8f, 0x7d, 0xbf, 0x6e, - 0xb1, 0xc7, 0xbe, 0x35, 0xb2, 0xd3, 0x6a, 0x3a, 0xf1, 0x49, 0x78, 0x13, 0xbd, 0x09, 0xa5, 0x58, - 0xb4, 0xd6, 0x2d, 0x17, 0x99, 0xd6, 0x29, 0xf6, 0xe0, 0xa9, 0xf8, 0x27, 0x59, 0x8a, 0x15, 0x19, - 0xfb, 0x9f, 0xf2, 0x19, 0x90, 0x90, 0x13, 0xd0, 0xea, 0x55, 0x4d, 0xad, 0x5e, 0xa5, 0xc7, 0x17, - 0xe4, 0x68, 0xf7, 0xfe, 0x89, 0xd9, 0x6f, 0x26, 0xaa, 0x7e, 0xd4, 0x5f, 0x99, 0xed, 0x1f, 0xb6, - 0xe0, 0x74, 0x96, 0x59, 0x16, 0xe5, 0x79, 0xb9, 0xa0, 0xac, 0x5e, 0xdd, 0xd5, 0x08, 0xde, 0x15, - 0xe5, 0x58, 0x61, 0xf4, 0x9d, 0xe6, 0xe3, 0x68, 0x61, 0xef, 0x6e, 0xc3, 0x58, 0x2d, 0x24, 0xda, - 0x1d, 0xf0, 0x3a, 0x77, 0x0f, 0xe4, 0xfd, 0x79, 0xf6, 0xc8, 0xae, 0x81, 0xf6, 0xcf, 0x14, 0xe0, - 0x34, 0x7f, 0x36, 0x9b, 0xdb, 0x0d, 0x3c, 0xb7, 0x16, 0xb8, 0x22, 0x45, 0xcb, 0x5b, 0x30, 0xda, - 0xd2, 0xb4, 0x1b, 0xdd, 0x02, 0x6f, 0xe9, 0x5a, 0x90, 0x44, 0xca, 0xd4, 0x4b, 0xb1, 0x41, 0x0b, - 0xb9, 0x30, 0x4a, 0x76, 0xbd, 0x86, 0x7a, 0x7b, 0x29, 0x1c, 0xf9, 0x6e, 0x50, 0xad, 0x2c, 0x6a, - 0x74, 0xb0, 0x41, 0xf5, 0x11, 0x64, 0xed, 0xb3, 0x7f, 0xc4, 0x82, 0xc7, 0x72, 0xc2, 0x74, 0xd1, - 0xe6, 0xee, 0xb3, 0x07, 0x4a, 0x91, 0x00, 0x4c, 0x35, 0xc7, 0x9f, 0x2d, 0xb1, 0x80, 0xa2, 0xcf, - 0x01, 0xf0, 0x67, 0x47, 0x2a, 0x74, 0xf5, 0x8a, 0x67, 0x64, 0x84, 0x62, 0xd1, 0x42, 0x68, 0xc8, - 0xfa, 0x58, 0xa3, 0x65, 0xff, 0x64, 0x11, 0x06, 0xd9, 0x33, 0x17, 0x5a, 0x82, 0xe1, 0x2d, 0x1e, - 0xb8, 0xba, 0x9f, 0x18, 0xd9, 0x89, 0xf4, 0xca, 0x0b, 0xb0, 0xac, 0x8c, 0x56, 0xe0, 0x14, 0x0f, - 0xfc, 0xdd, 0xac, 0x92, 0xa6, 0xb3, 0x27, 0x95, 0x20, 0x3c, 0x69, 0x96, 0x0a, 0x07, 0xb2, 0xdc, - 0x89, 0x82, 0xb3, 0xea, 0xa1, 0xd7, 0x61, 0x9c, 0x72, 0x8d, 0x41, 0x3b, 0x96, 0x94, 0x78, 0xc8, - 0x6f, 0xc5, 0xa6, 0xae, 0x19, 0x50, 0x9c, 0xc2, 0xa6, 0xe2, 0x5c, 0xab, 0x43, 0xdd, 0x33, 0x98, - 0x88, 0x73, 0xa6, 0x8a, 0xc7, 0xc4, 0x65, 0xf6, 0x58, 0x6d, 0x66, 0x7d, 0xb6, 0xb6, 0x15, 0x92, - 0x68, 0x2b, 0x68, 0xba, 0x22, 0xe7, 0x7a, 0x62, 0x8f, 0x95, 0x82, 0xe3, 0x8e, 0x1a, 0x94, 0xca, - 0x86, 0xe3, 0x35, 0xdb, 0x21, 0x49, 0xa8, 0x0c, 0x99, 0x54, 0x96, 0x52, 0x70, 0xdc, 0x51, 0x83, - 0xae, 0xa3, 0x33, 0x22, 0x09, 0xba, 0x0c, 0x52, 0xa0, 0x8c, 0xec, 0x86, 0xa5, 0xbb, 0x56, 0x97, - 0x28, 0x3d, 0xc2, 0x0c, 0x49, 0xa5, 0x51, 0xd7, 0x94, 0xa2, 0xc2, 0x51, 0x4b, 0x52, 0x79, 0x98, - 0x54, 0xdc, 0xdf, 0x5f, 0x80, 0x53, 0x19, 0xc6, 0xbc, 0xfc, 0xa8, 0xda, 0xf4, 0xa2, 0x58, 0x25, - 0x06, 0xd2, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, 0x1f, 0x80, 0xc2, - 0x58, 0x4e, 0x40, 0x8f, 0x98, 0x62, 0xe7, 0x12, 0x0c, 0xb4, 0x23, 0x22, 0xe3, 0x6b, 0xa9, 0xf3, - 0x9b, 0xa9, 0xc9, 0x19, 0x84, 0xb2, 0xa6, 0x9b, 0x4a, 0x43, 0xad, 0xb1, 0xa6, 0x5c, 0xed, 0xcc, - 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x0c, 0x6c, 0x12, 0x15, 0x86, 0x95, 0x62, 0x01, - 0xb5, 0xbf, 0x52, 0x84, 0x73, 0xb9, 0xe6, 0xfd, 0xb4, 0xeb, 0x3b, 0x81, 0xef, 0xc5, 0x81, 0x7a, - 0x6a, 0xe5, 0x91, 0x60, 0x48, 0x6b, 0x6b, 0x45, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xcb, 0xb4, 0xfd, - 0xe9, 0x14, 0x49, 0xf3, 0x55, 0x23, 0x73, 0x7f, 0xbf, 0xe9, 0xe7, 0x9e, 0x84, 0x81, 0x56, 0x10, - 0x34, 0xd3, 0x87, 0x16, 0xed, 0x6e, 0x10, 0x34, 0x31, 0x03, 0xa2, 0x4f, 0x88, 0xf1, 0x4a, 0xbd, - 0x2d, 0x62, 0xc7, 0x0d, 0x22, 0x6d, 0xd0, 0x9e, 0x86, 0xe1, 0x6d, 0xb2, 0x17, 0x7a, 0xfe, 0x66, - 0xfa, 0xcd, 0xf9, 0x26, 0x2f, 0xc6, 0x12, 0x6e, 0x26, 0xcc, 0x18, 0x3e, 0xee, 0xbc, 0x71, 0xa5, - 0x9e, 0x57, 0xe0, 0x0f, 0x14, 0x61, 0x02, 0xcf, 0x57, 0xbf, 0x39, 0x11, 0x77, 0x3a, 0x27, 0xe2, - 0xb8, 0xf3, 0xc6, 0xf5, 0x9e, 0x8d, 0x5f, 0xb4, 0x60, 0x82, 0x05, 0x95, 0x16, 0xf1, 0x47, 0xbc, - 0xc0, 0x3f, 0x01, 0x16, 0xef, 0x49, 0x18, 0x0c, 0x69, 0xa3, 0xe9, 0xdc, 0x48, 0xac, 0x27, 0x98, - 0xc3, 0xd0, 0x79, 0x18, 0x60, 0x5d, 0xa0, 0x93, 0x37, 0xca, 0xd3, 0x4a, 0x54, 0x9d, 0xd8, 0xc1, - 0xac, 0x94, 0x39, 0xd5, 0x63, 0xd2, 0x6a, 0x7a, 0xbc, 0xd3, 0xc9, 0xc3, 0xca, 0x47, 0xc3, 0xa9, - 0x3e, 0xb3, 0x6b, 0x1f, 0xcc, 0xa9, 0x3e, 0x9b, 0x64, 0x77, 0xf1, 0xe9, 0x8f, 0x0a, 0x70, 0x31, - 0xb3, 0x5e, 0xdf, 0x4e, 0xf5, 0xdd, 0x6b, 0x1f, 0x8f, 0xe9, 0x50, 0xb6, 0x45, 0x4f, 0xf1, 0x04, - 0x2d, 0x7a, 0x06, 0xfa, 0xe5, 0x30, 0x07, 0xfb, 0xf0, 0x75, 0xcf, 0x1c, 0xb2, 0x8f, 0x88, 0xaf, - 0x7b, 0x66, 0xdf, 0x72, 0xc4, 0xbf, 0x3f, 0x2f, 0xe4, 0x7c, 0x0b, 0x13, 0x04, 0xaf, 0xd0, 0x73, - 0x86, 0x01, 0x23, 0xc1, 0x31, 0x8f, 0xf2, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x9e, 0xe6, 0x35, - 0x5e, 0xc8, 0x4f, 0x15, 0x9a, 0xdb, 0xd4, 0xac, 0xf9, 0x0e, 0xa6, 0x86, 0x20, 0xc3, 0x83, 0x7c, - 0x45, 0x13, 0xde, 0x8b, 0xfd, 0x0b, 0xef, 0xa3, 0xd9, 0x82, 0x3b, 0x9a, 0x83, 0x89, 0x1d, 0xcf, - 0xa7, 0xc7, 0xe6, 0x9e, 0xc9, 0xb2, 0xaa, 0x20, 0x2a, 0x2b, 0x26, 0x18, 0xa7, 0xf1, 0x67, 0x5e, - 0x85, 0xb1, 0x87, 0x57, 0x5b, 0x7e, 0xbd, 0x08, 0x8f, 0x77, 0xd9, 0xf6, 0xfc, 0xac, 0x37, 0xe6, - 0x40, 0x3b, 0xeb, 0x3b, 0xe6, 0xa1, 0x06, 0xa7, 0x37, 0xda, 0xcd, 0xe6, 0x1e, 0x33, 0x9a, 0x25, - 0xae, 0xc4, 0x10, 0x3c, 0xe5, 0x79, 0x99, 0xc8, 0x63, 0x29, 0x03, 0x07, 0x67, 0xd6, 0x44, 0x6f, - 0x00, 0x0a, 0x44, 0x9e, 0xe2, 0xeb, 0xc4, 0x17, 0xaf, 0x0b, 0x6c, 0xe0, 0x8b, 0xc9, 0x66, 0xbc, - 0xdd, 0x81, 0x81, 0x33, 0x6a, 0x51, 0xe1, 0x80, 0xde, 0x4a, 0x7b, 0xaa, 0x5b, 0x29, 0xe1, 0x00, - 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xeb, 0x30, 0xe5, 0xec, 0x3a, 0x1e, 0x0f, 0x2e, 0x28, 0x09, 0x70, - 0xe9, 0x40, 0x29, 0xcb, 0xe6, 0xd2, 0x08, 0xb8, 0xb3, 0x4e, 0xca, 0xaf, 0x7c, 0x28, 0xdf, 0xaf, - 0xbc, 0xfb, 0xb9, 0xd8, 0x4b, 0xf7, 0x6b, 0xff, 0x67, 0x8b, 0x5e, 0x5f, 0x9c, 0xc9, 0x37, 0xc3, - 0x23, 0xbd, 0xca, 0xcc, 0x62, 0xb8, 0x32, 0x50, 0x73, 0xf1, 0x3e, 0xa3, 0x99, 0xc5, 0x24, 0x40, - 0x6c, 0xe2, 0xf2, 0x05, 0x11, 0x25, 0x9e, 0x45, 0x06, 0x8b, 0x2f, 0x42, 0x44, 0x28, 0x0c, 0xf4, - 0x79, 0x18, 0x76, 0xbd, 0x5d, 0x2f, 0x0a, 0x42, 0xb1, 0x59, 0x8e, 0xe8, 0x9f, 0x91, 0x9c, 0x83, - 0x55, 0x4e, 0x06, 0x4b, 0x7a, 0xf6, 0x0f, 0x14, 0x60, 0x4c, 0xb6, 0xf8, 0x66, 0x3b, 0x88, 0x9d, - 0x13, 0xb8, 0x96, 0xaf, 0x1b, 0xd7, 0xf2, 0x27, 0xba, 0xc5, 0xc9, 0x60, 0x5d, 0xca, 0xbd, 0x8e, - 0x6f, 0xa7, 0xae, 0xe3, 0xa7, 0x7a, 0x93, 0xea, 0x7e, 0x0d, 0xff, 0x33, 0x0b, 0xa6, 0x0c, 0xfc, - 0x13, 0xb8, 0x0d, 0x96, 0xcc, 0xdb, 0xe0, 0x89, 0x9e, 0xdf, 0x90, 0x73, 0x0b, 0x7c, 0x6f, 0x31, - 0xd5, 0x77, 0x76, 0xfa, 0xbf, 0x07, 0x03, 0x5b, 0x4e, 0xe8, 0x76, 0x8b, 0xc7, 0xdb, 0x51, 0x69, - 0xf6, 0x86, 0x13, 0xba, 0xfc, 0x0c, 0x7f, 0x56, 0x25, 0xfb, 0x74, 0x42, 0xb7, 0xa7, 0x23, 0x1d, - 0x6b, 0x0a, 0xbd, 0x02, 0x43, 0x51, 0x23, 0x68, 0x29, 0x33, 0xd7, 0x4b, 0x3c, 0x11, 0x28, 0x2d, - 0x39, 0xdc, 0xaf, 0x20, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x0b, 0xc6, 0xd8, 0x2f, 0x65, - 0x7f, 0x51, 0xcc, 0xcf, 0x02, 0x51, 0xd7, 0x11, 0xb9, 0x19, 0x8f, 0x51, 0x84, 0x4d, 0x52, 0x33, - 0x9b, 0x50, 0x56, 0x9f, 0xf5, 0x48, 0x1d, 0xa0, 0xfe, 0x43, 0x11, 0x4e, 0x65, 0xac, 0x39, 0x14, - 0x19, 0x33, 0xf1, 0x7c, 0x9f, 0x4b, 0xf5, 0x03, 0xce, 0x45, 0xc4, 0xa4, 0x21, 0x57, 0xac, 0xad, - 0xbe, 0x1b, 0xbd, 0x13, 0x91, 0x74, 0xa3, 0xb4, 0xa8, 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, - 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0x29, 0xc2, 0xe9, 0xac, 0xd0, 0x3d, 0xe8, 0x3b, - 0x53, 0x19, 0x81, 0x5e, 0xec, 0x37, 0xe8, 0x0f, 0x4f, 0x13, 0x24, 0x12, 0x7a, 0xcf, 0x9a, 0x39, - 0x82, 0x7a, 0x0e, 0xb3, 0x68, 0x93, 0x79, 0xcd, 0x86, 0x3c, 0x93, 0x93, 0x3c, 0x3e, 0x3e, 0xdd, - 0x77, 0x07, 0x44, 0x0a, 0xa8, 0x28, 0xe5, 0x35, 0x2b, 0x8b, 0x7b, 0x7b, 0xcd, 0xca, 0x96, 0x67, - 0x3c, 0x18, 0xd1, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x36, 0xbd, 0xad, 0xb4, 0x7e, 0x3f, 0xd2, 0x59, - 0xff, 0x11, 0x0b, 0x52, 0x36, 0xa5, 0x4a, 0x2d, 0x66, 0xe5, 0xaa, 0xc5, 0x2e, 0xc1, 0x40, 0x18, - 0x34, 0x49, 0x3a, 0x01, 0x0f, 0x0e, 0x9a, 0x04, 0x33, 0x08, 0xc5, 0x88, 0x13, 0x65, 0xc7, 0xa8, - 0x2e, 0xc8, 0x09, 0x11, 0xed, 0x49, 0x18, 0x6c, 0x92, 0x5d, 0xd2, 0x4c, 0x47, 0xb7, 0xbf, 0x45, - 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x38, 0x00, 0x17, 0xba, 0xfa, 0x9d, 0x53, 0x71, 0x68, 0xd3, 0x89, - 0xc9, 0x7d, 0x67, 0x2f, 0x1d, 0x86, 0xfa, 0x3a, 0x2f, 0xc6, 0x12, 0xce, 0xcc, 0xec, 0x79, 0xd8, - 0xc9, 0x94, 0x12, 0x51, 0x44, 0x9b, 0x14, 0x50, 0x53, 0x29, 0x55, 0x3c, 0x0e, 0xa5, 0xd4, 0x35, - 0x80, 0x28, 0x6a, 0x72, 0xab, 0x05, 0x57, 0xd8, 0xef, 0x27, 0xe1, 0x49, 0xeb, 0xb7, 0x04, 0x04, - 0x6b, 0x58, 0xa8, 0x0a, 0x93, 0xad, 0x30, 0x88, 0xb9, 0x4e, 0xb6, 0xca, 0xcd, 0x9d, 0x06, 0x4d, - 0x97, 0xdf, 0x5a, 0x0a, 0x8e, 0x3b, 0x6a, 0xa0, 0x97, 0x60, 0x44, 0xb8, 0x01, 0xd7, 0x82, 0xa0, - 0x29, 0xd4, 0x40, 0xca, 0x78, 0xa6, 0x9e, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x8a, 0xde, 0xe1, - 0xcc, 0x6a, 0x5c, 0xd9, 0xab, 0xe1, 0xa5, 0xc2, 0x78, 0x95, 0xfa, 0x0a, 0xe3, 0x95, 0x28, 0xc6, - 0xca, 0x7d, 0xbf, 0x6d, 0x41, 0x4f, 0x55, 0xd2, 0xcf, 0x0d, 0xc0, 0x29, 0xb1, 0x70, 0x1e, 0xf5, - 0x72, 0xb9, 0xd3, 0xb9, 0x5c, 0x8e, 0x43, 0x75, 0xf6, 0xcd, 0x35, 0x73, 0xd2, 0x6b, 0xe6, 0x07, - 0x2d, 0x30, 0xd9, 0x2b, 0xf4, 0xff, 0xe5, 0xc6, 0xf1, 0x7f, 0x29, 0x97, 0x5d, 0x73, 0xe5, 0x05, - 0xf2, 0x01, 0x23, 0xfa, 0xdb, 0xff, 0xc9, 0x82, 0x27, 0x7a, 0x52, 0x44, 0x8b, 0x50, 0x66, 0x3c, - 0xa0, 0x26, 0x9d, 0x3d, 0xa5, 0xcc, 0x21, 0x25, 0x20, 0x87, 0x25, 0x4d, 0x6a, 0xa2, 0xc5, 0x8e, - 0x84, 0x09, 0x4f, 0x67, 0x24, 0x4c, 0x38, 0x63, 0x0c, 0xcf, 0x43, 0x66, 0x4c, 0xf8, 0x95, 0x22, - 0x0c, 0xf1, 0x15, 0x7f, 0x02, 0x62, 0xd8, 0x92, 0xd0, 0xdb, 0x76, 0x09, 0xe4, 0xc5, 0xfb, 0x32, - 0x5b, 0x75, 0x62, 0x87, 0xb3, 0x09, 0xea, 0xb6, 0x4a, 0x34, 0xbc, 0x68, 0xd6, 0xb8, 0xcf, 0x66, - 0x52, 0x8a, 0x49, 0xe0, 0x34, 0xb4, 0xdb, 0xed, 0x8b, 0x00, 0x51, 0x1c, 0x7a, 0xfe, 0x26, 0xa5, - 0x21, 0x42, 0xc2, 0x7d, 0xb2, 0x4b, 0xeb, 0x75, 0x85, 0xcc, 0xfb, 0x90, 0xec, 0x74, 0x05, 0xc0, - 0x1a, 0xc5, 0x99, 0x97, 0xa1, 0xac, 0x90, 0x7b, 0x69, 0x71, 0x46, 0x75, 0xe6, 0xe2, 0xb3, 0x30, - 0x91, 0x6a, 0xeb, 0x48, 0x4a, 0xa0, 0x5f, 0xb2, 0x60, 0x82, 0x77, 0x79, 0xd1, 0xdf, 0x15, 0x67, - 0xea, 0xfb, 0x70, 0xba, 0x99, 0x71, 0xb6, 0x89, 0x19, 0xed, 0xff, 0x2c, 0x54, 0x4a, 0x9f, 0x2c, - 0x28, 0xce, 0x6c, 0x03, 0x5d, 0xa1, 0xeb, 0x96, 0x9e, 0x5d, 0x4e, 0x53, 0xb8, 0x6c, 0x8d, 0xf2, - 0x35, 0xcb, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x14, 0xef, 0xf9, 0x4d, 0xb2, 0xa7, 0x76, - 0xf8, 0x87, 0xd9, 0x77, 0x91, 0xc3, 0xa4, 0x90, 0x93, 0xc3, 0x44, 0xff, 0xb4, 0x62, 0xd7, 0x4f, - 0xfb, 0x19, 0x0b, 0xc4, 0x0a, 0x3c, 0x01, 0x51, 0xfe, 0x5b, 0x4d, 0x51, 0x7e, 0x26, 0x7f, 0x51, - 0xe7, 0xc8, 0xf0, 0x7f, 0x66, 0xc1, 0x24, 0x47, 0x48, 0xde, 0x9c, 0x3f, 0xd4, 0x79, 0xe8, 0x27, - 0x19, 0xa1, 0xca, 0x50, 0x9e, 0xfd, 0x51, 0xc6, 0x64, 0x0d, 0x74, 0x9d, 0x2c, 0x57, 0x6e, 0xa0, - 0x23, 0x24, 0xe2, 0x3c, 0x72, 0x2c, 0x70, 0xfb, 0x0f, 0x2d, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, - 0x4c, 0x05, 0x2b, 0xd5, 0xae, 0x8b, 0xe4, 0xa8, 0x51, 0x10, 0xac, 0x61, 0x1d, 0xcb, 0xf0, 0xa4, - 0x0c, 0x07, 0x8a, 0xbd, 0x0d, 0x07, 0x8e, 0x30, 0xa2, 0x7f, 0x30, 0x08, 0x69, 0xa7, 0x06, 0x74, - 0x17, 0x46, 0x1b, 0x4e, 0xcb, 0x59, 0xf7, 0x9a, 0x5e, 0xec, 0x91, 0xa8, 0x9b, 0xc5, 0xd1, 0x82, - 0x86, 0x27, 0x9e, 0x7a, 0xb5, 0x12, 0x6c, 0xd0, 0x41, 0xb3, 0x00, 0xad, 0xd0, 0xdb, 0xf5, 0x9a, - 0x64, 0x93, 0x69, 0x1c, 0x98, 0x93, 0x28, 0x37, 0xa3, 0x91, 0xa5, 0x58, 0xc3, 0xc8, 0xf0, 0xf7, - 0x2b, 0x3e, 0x3a, 0x7f, 0xbf, 0x81, 0x23, 0xfa, 0xfb, 0x0d, 0xf6, 0xe5, 0xef, 0x87, 0xe1, 0xac, - 0x64, 0x91, 0xe8, 0xff, 0x25, 0xaf, 0x49, 0x04, 0x5f, 0xcc, 0x5d, 0x47, 0x67, 0x0e, 0xf6, 0x2b, - 0x67, 0x71, 0x26, 0x06, 0xce, 0xa9, 0x89, 0x3e, 0x07, 0xd3, 0x4e, 0xb3, 0x19, 0xdc, 0x57, 0xa3, - 0xb6, 0x18, 0x35, 0x9c, 0x26, 0xd7, 0xd8, 0x0f, 0x33, 0xaa, 0xe7, 0x0f, 0xf6, 0x2b, 0xd3, 0x73, - 0x39, 0x38, 0x38, 0xb7, 0x76, 0xca, 0x5d, 0xb0, 0xd4, 0xd3, 0x5d, 0xf0, 0x35, 0x28, 0xb7, 0xc2, - 0xa0, 0xb1, 0xa2, 0xf9, 0x14, 0x5d, 0x64, 0x69, 0xfe, 0x65, 0xe1, 0xe1, 0x7e, 0x65, 0x4c, 0xfd, - 0x61, 0x37, 0x7c, 0x52, 0x21, 0xc3, 0x4b, 0x10, 0x1e, 0xa5, 0x97, 0xe0, 0x36, 0x9c, 0xaa, 0x93, - 0xd0, 0x63, 0xf9, 0x4a, 0xdd, 0xe4, 0xfc, 0x58, 0x83, 0x72, 0x98, 0x3a, 0x31, 0xfb, 0x0a, 0x7e, - 0xa5, 0xc5, 0x64, 0x96, 0x27, 0x64, 0x42, 0xc8, 0xfe, 0x53, 0x0b, 0x86, 0x85, 0x39, 0xfd, 0x09, - 0x30, 0x6a, 0x73, 0x86, 0xbe, 0xbc, 0x92, 0x7d, 0xab, 0xb0, 0xce, 0xe4, 0x6a, 0xca, 0x97, 0x53, - 0x9a, 0xf2, 0x27, 0xba, 0x11, 0xe9, 0xae, 0x23, 0xff, 0xdb, 0x45, 0x18, 0x37, 0x3d, 0x60, 0x4e, - 0x60, 0x08, 0x56, 0x61, 0x38, 0x12, 0xee, 0x56, 0x85, 0x7c, 0x83, 0xee, 0xf4, 0x24, 0x26, 0xd6, - 0x5a, 0xc2, 0xc1, 0x4a, 0x12, 0xc9, 0xf4, 0xe3, 0x2a, 0x3e, 0x42, 0x3f, 0xae, 0x5e, 0x4e, 0x48, - 0x03, 0xc7, 0xe1, 0x84, 0x64, 0x7f, 0x95, 0xdd, 0x6c, 0x7a, 0xf9, 0x09, 0x30, 0x3d, 0xd7, 0xcd, - 0x3b, 0xd0, 0xee, 0xb2, 0xb2, 0x44, 0xa7, 0x72, 0x98, 0x9f, 0x5f, 0xb0, 0xe0, 0x42, 0xc6, 0x57, - 0x69, 0x9c, 0xd0, 0xb3, 0x50, 0x72, 0xda, 0xae, 0xa7, 0xf6, 0xb2, 0xf6, 0x6a, 0x36, 0x27, 0xca, - 0xb1, 0xc2, 0x40, 0x0b, 0x30, 0x45, 0x1e, 0xb4, 0x3c, 0xfe, 0x6c, 0xa9, 0x9b, 0x54, 0x16, 0x79, - 0x40, 0xe0, 0xc5, 0x34, 0x10, 0x77, 0xe2, 0x2b, 0x97, 0xf9, 0x62, 0xae, 0xcb, 0xfc, 0x3f, 0xb4, - 0x60, 0x44, 0xb9, 0xd6, 0x3c, 0xf2, 0xd1, 0xfe, 0x36, 0x73, 0xb4, 0x1f, 0xef, 0x32, 0xda, 0x39, - 0xc3, 0xfc, 0x77, 0x0b, 0xaa, 0xbf, 0xb5, 0x20, 0x8c, 0xfb, 0xe0, 0xb0, 0x5e, 0x81, 0x52, 0x2b, - 0x0c, 0xe2, 0xa0, 0x11, 0x34, 0x05, 0x83, 0x75, 0x3e, 0x89, 0xe8, 0xc0, 0xcb, 0x0f, 0xb5, 0xdf, - 0x58, 0x61, 0xb3, 0xd1, 0x0b, 0xc2, 0x58, 0x30, 0x35, 0xc9, 0xe8, 0x05, 0x61, 0x8c, 0x19, 0x04, - 0xb9, 0x00, 0xb1, 0x13, 0x6e, 0x92, 0x98, 0x96, 0x89, 0xe0, 0x30, 0xf9, 0x87, 0x47, 0x3b, 0xf6, - 0x9a, 0xb3, 0x9e, 0x1f, 0x47, 0x71, 0x38, 0xbb, 0xec, 0xc7, 0xb7, 0x43, 0x2e, 0xaf, 0x69, 0x21, - 0x1a, 0x14, 0x2d, 0xac, 0xd1, 0x95, 0x8e, 0xad, 0xac, 0x8d, 0x41, 0xf3, 0xfd, 0x7d, 0x55, 0x94, - 0x63, 0x85, 0x61, 0xbf, 0xcc, 0xae, 0x12, 0x36, 0x40, 0x47, 0x8b, 0x9e, 0xf0, 0xb5, 0x92, 0x1a, - 0x5a, 0xf6, 0xf8, 0x56, 0xd5, 0x63, 0x34, 0x74, 0x3f, 0xb9, 0x69, 0xc3, 0xba, 0x7b, 0x4f, 0x12, - 0xc8, 0x01, 0x7d, 0x7b, 0x87, 0x59, 0xc6, 0x73, 0x3d, 0xae, 0x80, 0x23, 0x18, 0x62, 0xb0, 0x20, - 0xe5, 0x2c, 0x84, 0xf3, 0x72, 0x4d, 0x2c, 0x72, 0x2d, 0x48, 0xb9, 0x00, 0xe0, 0x04, 0x07, 0x5d, - 0x15, 0xd2, 0xfe, 0x80, 0x91, 0xaa, 0x50, 0x4a, 0xfb, 0xf2, 0xf3, 0x35, 0x71, 0xff, 0x79, 0x18, - 0x51, 0x29, 0x0b, 0x6b, 0x3c, 0xf3, 0x9b, 0x08, 0x95, 0xb3, 0x98, 0x14, 0x63, 0x1d, 0x07, 0xad, - 0xc1, 0x44, 0xc4, 0x55, 0x3d, 0x2a, 0x22, 0x22, 0x57, 0x99, 0x7d, 0x52, 0x9a, 0x73, 0xd4, 0x4d, - 0xf0, 0x21, 0x2b, 0xe2, 0x47, 0x87, 0xf4, 0x4e, 0x4d, 0x93, 0x40, 0xaf, 0xc3, 0x78, 0x33, 0x70, - 0xdc, 0x79, 0xa7, 0xe9, 0xf8, 0x0d, 0xf6, 0xbd, 0x25, 0x33, 0xd3, 0xd3, 0x2d, 0x03, 0x8a, 0x53, - 0xd8, 0x94, 0x31, 0xd3, 0x4b, 0x44, 0x14, 0x4f, 0xc7, 0xdf, 0x24, 0x91, 0x48, 0xb8, 0xc6, 0x18, - 0xb3, 0x5b, 0x39, 0x38, 0x38, 0xb7, 0x36, 0x7a, 0x05, 0x46, 0xe5, 0xe7, 0x6b, 0xbe, 0xd7, 0x89, - 0xed, 0xbd, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x0f, 0x67, 0xe4, 0xff, 0xb5, 0xd0, 0xd9, 0xd8, 0xf0, - 0x1a, 0xc2, 0x97, 0x8f, 0x3b, 0x20, 0xcd, 0x49, 0x8f, 0xa6, 0xc5, 0x2c, 0xa4, 0xc3, 0xfd, 0xca, - 0x25, 0x31, 0x6a, 0x99, 0x70, 0x36, 0x89, 0xd9, 0xf4, 0xd1, 0x0a, 0x9c, 0xda, 0x22, 0x4e, 0x33, - 0xde, 0x5a, 0xd8, 0x22, 0x8d, 0x6d, 0xb9, 0x89, 0x98, 0x47, 0xb7, 0x66, 0xb1, 0x7e, 0xa3, 0x13, - 0x05, 0x67, 0xd5, 0x43, 0x6f, 0xc3, 0x74, 0xab, 0xbd, 0xde, 0xf4, 0xa2, 0xad, 0xd5, 0x20, 0x66, - 0x16, 0x24, 0x2a, 0xe3, 0x9f, 0x70, 0xfd, 0x56, 0xde, 0xec, 0xb5, 0x1c, 0x3c, 0x9c, 0x4b, 0x01, - 0xbd, 0x0f, 0x67, 0x52, 0x8b, 0x41, 0x38, 0xa2, 0x8e, 0xe7, 0xc7, 0x44, 0xae, 0x67, 0x55, 0x10, - 0x8e, 0xa5, 0x59, 0x20, 0x9c, 0xdd, 0xc4, 0x07, 0xb3, 0x2b, 0x7a, 0x8f, 0x56, 0xd6, 0x98, 0x32, - 0xf4, 0x0e, 0x8c, 0xea, 0xab, 0x48, 0x5c, 0x30, 0x97, 0xb3, 0x79, 0x16, 0x6d, 0xb5, 0x71, 0x96, - 0x4e, 0xad, 0x28, 0x1d, 0x86, 0x0d, 0x8a, 0x36, 0x81, 0xec, 0xef, 0x43, 0xb7, 0xa0, 0xd4, 0x68, - 0x7a, 0xc4, 0x8f, 0x97, 0x6b, 0xdd, 0x02, 0xb3, 0x2c, 0x08, 0x1c, 0x31, 0x60, 0x22, 0x88, 0x2c, - 0x2f, 0xc3, 0x8a, 0x82, 0xfd, 0x6b, 0x05, 0xa8, 0xf4, 0x88, 0x48, 0x9c, 0x52, 0x7f, 0x5b, 0x7d, - 0xa9, 0xbf, 0xe7, 0x64, 0xfe, 0xc2, 0xd5, 0x94, 0x4e, 0x20, 0x95, 0x9b, 0x30, 0xd1, 0x0c, 0xa4, - 0xf1, 0xfb, 0x36, 0x47, 0xd6, 0x35, 0xe8, 0x03, 0x3d, 0x0d, 0xea, 0x8d, 0x97, 0xb3, 0xc1, 0xfe, - 0x05, 0x91, 0xdc, 0x57, 0x10, 0xfb, 0xab, 0x05, 0x38, 0xa3, 0x86, 0xf0, 0x1b, 0x77, 0xe0, 0xee, - 0x74, 0x0e, 0xdc, 0x31, 0xbc, 0x21, 0xd9, 0xb7, 0x61, 0x88, 0x07, 0xb6, 0xe9, 0x83, 0x01, 0x7a, - 0xd2, 0x8c, 0x82, 0xa6, 0xae, 0x69, 0x23, 0x12, 0xda, 0x5f, 0xb1, 0x60, 0x62, 0x6d, 0xa1, 0x56, - 0x0f, 0x1a, 0xdb, 0x24, 0x9e, 0xe3, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, 0x4d, 0x16, - 0xc7, 0x74, 0x09, 0x06, 0xb6, 0x82, 0x28, 0x4e, 0x3f, 0x30, 0xdf, 0x08, 0xa2, 0x18, 0x33, 0x88, - 0xfd, 0x3b, 0x16, 0x0c, 0xb2, 0xac, 0xbb, 0xbd, 0x52, 0x41, 0xf7, 0xf3, 0x5d, 0xe8, 0x25, 0x18, - 0x22, 0x1b, 0x1b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0x5e, 0xb2, 0x43, 0x8b, 0xac, 0x94, 0x5e, 0xfa, - 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x0f, 0xca, 0xb1, 0xb7, 0x43, 0xe6, 0x5c, 0x57, 0x3c, - 0xd1, 0x3d, 0x84, 0x53, 0xf2, 0x9a, 0x24, 0x80, 0x13, 0x5a, 0xf6, 0x57, 0x0a, 0x00, 0x49, 0xbc, - 0x84, 0x5e, 0x9f, 0x38, 0xdf, 0xf1, 0x78, 0x73, 0x39, 0xe3, 0xf1, 0x06, 0x25, 0x04, 0x33, 0x5e, - 0x6e, 0xd4, 0x30, 0x15, 0xfb, 0x1a, 0xa6, 0x81, 0xa3, 0x0c, 0xd3, 0x02, 0x4c, 0x25, 0xf1, 0x1e, - 0xcc, 0xe0, 0x37, 0x4c, 0x48, 0x59, 0x4b, 0x03, 0x71, 0x27, 0xbe, 0x4d, 0xe0, 0x92, 0x8c, 0x7a, - 0x2a, 0xef, 0x1a, 0x66, 0x01, 0x7a, 0x84, 0xac, 0xe0, 0xc9, 0xeb, 0x54, 0x21, 0xf7, 0x75, 0xea, - 0xc7, 0x2d, 0x38, 0x9d, 0x6e, 0x87, 0xb9, 0xe4, 0x7d, 0xd9, 0x82, 0x33, 0xec, 0x8d, 0x8e, 0xb5, - 0xda, 0xf9, 0x22, 0xf8, 0x62, 0x76, 0x1c, 0x8c, 0xee, 0x3d, 0x4e, 0xdc, 0xb1, 0x57, 0xb2, 0x48, - 0xe3, 0xec, 0x16, 0xed, 0x2f, 0x5b, 0x70, 0x2e, 0x37, 0xd9, 0x13, 0xba, 0x02, 0x25, 0xa7, 0xe5, - 0x71, 0x05, 0x98, 0xd8, 0xef, 0x4c, 0x7a, 0xac, 0x2d, 0x73, 0xf5, 0x97, 0x82, 0xaa, 0x24, 0x94, - 0x85, 0xdc, 0x24, 0x94, 0x3d, 0x73, 0x4a, 0xda, 0xdf, 0x67, 0x81, 0xf0, 0xc2, 0xea, 0xe3, 0x90, - 0x79, 0x4b, 0xe6, 0xf0, 0x35, 0x02, 0xce, 0x5f, 0xca, 0x77, 0x4b, 0x13, 0x61, 0xe6, 0xd5, 0xa5, - 0x6e, 0x04, 0x97, 0x37, 0x68, 0xd9, 0x2e, 0x08, 0x68, 0x95, 0x30, 0x9d, 0x55, 0xef, 0xde, 0x5c, - 0x03, 0x70, 0x19, 0xae, 0x96, 0xc9, 0x53, 0x5d, 0x21, 0x55, 0x05, 0xc1, 0x1a, 0x96, 0xfd, 0xef, - 0x0a, 0x30, 0x22, 0x03, 0x9c, 0xb7, 0xfd, 0x7e, 0x24, 0xcb, 0x23, 0x65, 0x3c, 0x62, 0xa9, 0x6f, - 0x29, 0xe1, 0x5a, 0x22, 0x90, 0x27, 0xa9, 0x6f, 0x25, 0x00, 0x27, 0x38, 0xe8, 0x69, 0x18, 0x8e, - 0xda, 0xeb, 0x0c, 0x3d, 0xe5, 0x33, 0x54, 0xe7, 0xc5, 0x58, 0xc2, 0xd1, 0xe7, 0x60, 0x92, 0xd7, - 0x0b, 0x83, 0x96, 0xb3, 0xc9, 0xb5, 0xad, 0x83, 0xca, 0xd9, 0x77, 0x72, 0x25, 0x05, 0x3b, 0xdc, - 0xaf, 0x9c, 0x4e, 0x97, 0x31, 0x3d, 0x7d, 0x07, 0x15, 0xf6, 0xf6, 0xcf, 0x1b, 0xa1, 0xcb, 0xb4, - 0xc3, 0x64, 0x20, 0x01, 0x61, 0x1d, 0xcf, 0x7e, 0x07, 0x50, 0x67, 0xa8, 0x77, 0xf4, 0x06, 0x37, - 0xf8, 0xf2, 0x42, 0xe2, 0x76, 0xd3, 0xdb, 0xeb, 0x2e, 0xad, 0xd2, 0xdc, 0x9f, 0xd7, 0xc2, 0xaa, - 0xbe, 0xfd, 0xd7, 0x8a, 0x30, 0x99, 0x76, 0x70, 0x44, 0x37, 0x60, 0x88, 0xdf, 0x91, 0x82, 0x7c, - 0x97, 0x67, 0x61, 0xcd, 0x2d, 0x92, 0x9d, 0x16, 0xe2, 0x9a, 0x15, 0xf5, 0xd1, 0xdb, 0x30, 0xe2, - 0x06, 0xf7, 0xfd, 0xfb, 0x4e, 0xe8, 0xce, 0xd5, 0x96, 0xc5, 0x72, 0xce, 0x64, 0xb5, 0xab, 0x09, - 0x9a, 0xee, 0x6a, 0xc9, 0x9e, 0x40, 0x12, 0x10, 0xd6, 0xc9, 0xa1, 0x35, 0x16, 0xbe, 0x72, 0xc3, - 0xdb, 0x5c, 0x71, 0x5a, 0xdd, 0xac, 0x7f, 0x17, 0x24, 0x92, 0x46, 0x79, 0x4c, 0xc4, 0xb8, 0xe4, - 0x00, 0x9c, 0x10, 0x42, 0xdf, 0x09, 0xa7, 0xa2, 0x1c, 0xed, 0x5c, 0x5e, 0xe6, 0x8f, 0x6e, 0x0a, - 0xab, 0xf9, 0xc7, 0xa8, 0x10, 0x94, 0xa5, 0xc7, 0xcb, 0x6a, 0xc6, 0xfe, 0xf5, 0x53, 0x60, 0x6c, - 0x62, 0x23, 0x11, 0x94, 0x75, 0x4c, 0x89, 0xa0, 0x30, 0x94, 0xc8, 0x4e, 0x2b, 0xde, 0xab, 0x7a, - 0x61, 0xb7, 0x44, 0x85, 0x8b, 0x02, 0xa7, 0x93, 0xa6, 0x84, 0x60, 0x45, 0x27, 0x3b, 0x5b, 0x57, - 0xf1, 0x43, 0xcc, 0xd6, 0x35, 0x70, 0x82, 0xd9, 0xba, 0x56, 0x61, 0x78, 0xd3, 0x8b, 0x31, 0x69, - 0x05, 0x82, 0x3b, 0xcd, 0x5c, 0x87, 0xd7, 0x39, 0x4a, 0x67, 0x5e, 0x18, 0x01, 0xc0, 0x92, 0x08, - 0x7a, 0x43, 0xed, 0xc0, 0xa1, 0x7c, 0xe1, 0xae, 0xf3, 0xfd, 0x32, 0x73, 0x0f, 0x8a, 0x9c, 0x5c, - 0xc3, 0x0f, 0x9b, 0x93, 0x6b, 0x49, 0x66, 0xd2, 0x2a, 0xe5, 0x9b, 0xea, 0xb3, 0x44, 0x59, 0x3d, - 0xf2, 0x67, 0xdd, 0xd5, 0xb3, 0x8f, 0x95, 0xf3, 0x4f, 0x02, 0x95, 0x58, 0xac, 0xcf, 0x9c, 0x63, - 0xdf, 0x67, 0xc1, 0x99, 0x56, 0x56, 0x22, 0x3e, 0xf1, 0xd6, 0xf4, 0x52, 0xdf, 0x99, 0x06, 0x8d, - 0x06, 0x99, 0x94, 0x9f, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0xae, 0xbb, 0x22, 0x69, 0xd6, - 0x93, 0x39, 0xc9, 0xcb, 0xba, 0xa4, 0x2c, 0x5b, 0xcb, 0x48, 0x94, 0xf5, 0xf1, 0xbc, 0x44, 0x59, - 0x7d, 0xa7, 0xc7, 0x7a, 0x43, 0xa5, 0x2d, 0x1b, 0xcb, 0x5f, 0x4a, 0x3c, 0x29, 0x59, 0xcf, 0x64, - 0x65, 0x6f, 0xa8, 0x64, 0x65, 0x5d, 0xe2, 0xea, 0xf1, 0x54, 0x64, 0x3d, 0x53, 0x94, 0x69, 0x69, - 0xc6, 0x26, 0x8e, 0x27, 0xcd, 0x98, 0x71, 0xd5, 0xf0, 0x4c, 0x57, 0xcf, 0xf4, 0xb8, 0x6a, 0x0c, - 0xba, 0xdd, 0x2f, 0x1b, 0x9e, 0x52, 0x6d, 0xea, 0xa1, 0x52, 0xaa, 0xdd, 0xd5, 0x53, 0x94, 0xa1, - 0x1e, 0x39, 0xb8, 0x28, 0x52, 0x9f, 0x89, 0xc9, 0xee, 0xea, 0x17, 0xe0, 0xa9, 0x7c, 0xba, 0xea, - 0x9e, 0xeb, 0xa4, 0x9b, 0x79, 0x05, 0x76, 0x24, 0x3c, 0x3b, 0x7d, 0x32, 0x09, 0xcf, 0xce, 0x1c, - 0x7b, 0xc2, 0xb3, 0xb3, 0x27, 0x90, 0xf0, 0xec, 0xb1, 0x0f, 0x35, 0xe1, 0xd9, 0xf4, 0x23, 0x48, - 0x78, 0xb6, 0x9a, 0x24, 0x3c, 0x3b, 0x97, 0x3f, 0x25, 0x19, 0xf6, 0xc3, 0x39, 0x69, 0xce, 0xee, - 0x32, 0x23, 0x02, 0x1e, 0x81, 0x43, 0x04, 0xfe, 0xcb, 0x4e, 0xee, 0x9c, 0x15, 0xa6, 0x83, 0x4f, - 0x89, 0x02, 0xe1, 0x84, 0x14, 0xa5, 0x9b, 0xa4, 0x3d, 0x7b, 0xbc, 0x8b, 0x1e, 0x37, 0x4b, 0x43, - 0xd6, 0x25, 0xd9, 0xd9, 0xeb, 0x3c, 0xd9, 0xd9, 0xf9, 0xfc, 0x93, 0x3c, 0x7d, 0xdd, 0x99, 0x29, - 0xce, 0xbe, 0xbf, 0x00, 0x17, 0xbb, 0xef, 0x8b, 0x44, 0x3d, 0x57, 0x4b, 0x9e, 0x93, 0x52, 0xea, - 0x39, 0x2e, 0x5b, 0x25, 0x58, 0x7d, 0x87, 0x39, 0xba, 0x0e, 0x53, 0xca, 0xf0, 0xb8, 0xe9, 0x35, - 0xf6, 0xb4, 0xa4, 0xd1, 0xca, 0xc1, 0xb2, 0x9e, 0x46, 0xc0, 0x9d, 0x75, 0xd0, 0x1c, 0x4c, 0x18, - 0x85, 0xcb, 0x55, 0x21, 0x43, 0x29, 0x7d, 0x60, 0xdd, 0x04, 0xe3, 0x34, 0xbe, 0xfd, 0xd3, 0x16, - 0x3c, 0x96, 0x93, 0x4b, 0xa4, 0xef, 0x28, 0x3e, 0x1b, 0x30, 0xd1, 0x32, 0xab, 0xf6, 0x08, 0xf6, - 0x65, 0x64, 0x2c, 0x51, 0x7d, 0x4d, 0x01, 0x70, 0x9a, 0xa8, 0xfd, 0x55, 0x0b, 0x2e, 0x74, 0x35, - 0x42, 0x41, 0x18, 0xce, 0x6e, 0xee, 0x44, 0xce, 0x42, 0x48, 0x5c, 0xe2, 0xc7, 0x9e, 0xd3, 0xac, - 0xb7, 0x48, 0x43, 0x53, 0xb0, 0x32, 0x5b, 0x9f, 0xeb, 0x2b, 0xf5, 0xb9, 0x4e, 0x0c, 0x9c, 0x53, - 0x13, 0x2d, 0x01, 0xea, 0x84, 0x88, 0x19, 0x66, 0xd1, 0x1c, 0x3b, 0xe9, 0xe1, 0x8c, 0x1a, 0xf3, - 0x57, 0x7e, 0xf3, 0xf7, 0x2e, 0x7e, 0xec, 0xb7, 0x7e, 0xef, 0xe2, 0xc7, 0x7e, 0xfb, 0xf7, 0x2e, - 0x7e, 0xec, 0xbb, 0x0f, 0x2e, 0x5a, 0xbf, 0x79, 0x70, 0xd1, 0xfa, 0xad, 0x83, 0x8b, 0xd6, 0x6f, - 0x1f, 0x5c, 0xb4, 0x7e, 0xf7, 0xe0, 0xa2, 0xf5, 0x95, 0xdf, 0xbf, 0xf8, 0xb1, 0xb7, 0x0a, 0xbb, - 0xcf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xc5, 0xa7, 0xa5, 0x2c, 0xed, 0x00, 0x00, + // 12780 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x24, 0x47, + 0x7a, 0xd8, 0xf5, 0xcc, 0x90, 0x9c, 0xf9, 0xf8, 0xae, 0x7d, 0x88, 0x4b, 0x69, 0x77, 0x56, 0xad, + 0xbb, 0xd5, 0xea, 0x24, 0x91, 0xa7, 0x95, 0x74, 0x92, 0x4f, 0x3a, 0xd9, 0x24, 0x87, 0xdc, 0x1d, + 0xed, 0x92, 0x3b, 0xaa, 0xe1, 0xee, 0xde, 0xc9, 0xba, 0xf3, 0x35, 0x67, 0x8a, 0x64, 0x8b, 0xc3, + 0xee, 0x51, 0x77, 0x0f, 0x77, 0xa9, 0xd8, 0x40, 0x72, 0x8e, 0x9d, 0x5c, 0x6c, 0x04, 0x87, 0xd8, + 0xc8, 0xc3, 0x36, 0x1c, 0xc0, 0x71, 0x60, 0x3b, 0x4e, 0x82, 0x38, 0x76, 0x6c, 0xc7, 0x67, 0x27, + 0x8e, 0x9d, 0x1f, 0x0e, 0x10, 0x5c, 0x9c, 0x00, 0xc1, 0x19, 0x30, 0xc2, 0xd8, 0x74, 0x1e, 0xf0, + 0x8f, 0x3c, 0x10, 0xe7, 0x47, 0xcc, 0x18, 0x71, 0x50, 0xcf, 0xae, 0xea, 0xe9, 0x9e, 0x19, 0xae, + 0xb8, 0x94, 0x7c, 0xb8, 0x7f, 0x33, 0xf5, 0x7d, 0xf5, 0x55, 0x75, 0x3d, 0xbf, 0xef, 0xab, 0xef, + 0x01, 0xaf, 0xed, 0xbc, 0x1a, 0xce, 0xb9, 0xfe, 0xfc, 0x4e, 0x67, 0x83, 0x04, 0x1e, 0x89, 0x48, + 0x38, 0xbf, 0x47, 0xbc, 0xa6, 0x1f, 0xcc, 0x0b, 0x80, 0xd3, 0x76, 0xe7, 0x1b, 0x7e, 0x40, 0xe6, + 0xf7, 0x5e, 0x98, 0xdf, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0xe7, 0xda, 0x81, 0x1f, 0xf9, 0x08, + 0x71, 0x9c, 0x39, 0xa7, 0xed, 0xce, 0x51, 0x9c, 0xb9, 0xbd, 0x17, 0x66, 0x9f, 0xdf, 0x72, 0xa3, + 0xed, 0xce, 0xc6, 0x5c, 0xc3, 0xdf, 0x9d, 0xdf, 0xf2, 0xb7, 0xfc, 0x79, 0x86, 0xba, 0xd1, 0xd9, + 0x64, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7d, 0x29, 0x6e, 0x66, 0xd7, 0x69, 0x6c, 0xbb, + 0x1e, 0x09, 0xf6, 0xe7, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0x68, 0x90, 0x64, + 0xc3, 0x3d, 0x6b, 0x85, 0xf3, 0xbb, 0x24, 0x72, 0x52, 0xba, 0x3b, 0x3b, 0x9f, 0x55, 0x2b, 0xe8, + 0x78, 0x91, 0xbb, 0xdb, 0xdd, 0xcc, 0xa7, 0xfb, 0x55, 0x08, 0x1b, 0xdb, 0x64, 0xd7, 0xe9, 0xaa, + 0xf7, 0x62, 0x56, 0xbd, 0x4e, 0xe4, 0xb6, 0xe6, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, + 0x86, 0x05, 0x97, 0x17, 0xee, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, + 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, 0xcf, 0x41, 0x71, + 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, 0x1d, 0x94, 0x3f, + 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, 0x19, 0xae, 0xef, + 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, 0x02, 0x8a, 0xe6, + 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, 0x0e, 0x2d, 0x4e, + 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, 0xdb, 0x6b, 0xed, + 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x91, 0x1c, + 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, + 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, 0xc5, 0xa9, 0xc3, + 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, 0xb2, 0x39, 0x46, + 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, 0x02, 0xac, 0x13, + 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0xa7, 0xb2, 0xe8, 0x6a, + 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0f, 0x13, 0x0b, + 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x09, 0x0a, 0x9e, 0xb3, 0x4b, 0xc4, 0xfc, + 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, 0xe7, 0xbe, 0xd7, + 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, 0x20, 0x35, 0x27, + 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0x07, 0x50, 0x5a, 0xd8, + 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, 0x81, 0x2a, 0x9a, + 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, 0x17, 0x05, 0xfb, + 0x8b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xcb, 0x1c, 0x9c, 0x5b, 0x78, + 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, 0xe2, 0x11, 0x50, + 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x08, 0xfd, 0x7d, 0x07, 0x57, 0xc5, 0x27, + 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, 0x85, 0xd1, 0x06, + 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4b, 0xd1, 0x97, 0xe2, 0xe2, + 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x15, + 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, 0xfa, 0x36, 0x41, + 0x2f, 0x40, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, 0x37, 0x5d, 0xaf, + 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc8, 0x82, 0x32, 0x83, + 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0xaf, 0x01, 0x84, + 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0x20, 0x84, + 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0xc6, 0x81, + 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, 0x72, 0x00, 0xd9, + 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0x9e, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x8f, 0xf8, + 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0x7d, 0x09, 0x8a, 0xf4, + 0x6e, 0x6a, 0x3a, 0x91, 0x23, 0xce, 0xbd, 0x4f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x73, 0xed, 0x9d, + 0x2d, 0x5a, 0x10, 0xce, 0x51, 0x6c, 0xba, 0xdb, 0x6e, 0x6f, 0xbc, 0x4b, 0x1a, 0xd1, 0x2a, 0x89, + 0x9c, 0xf8, 0x73, 0xe2, 0x32, 0xac, 0xa8, 0xa2, 0x9b, 0x30, 0x1c, 0x39, 0xc1, 0x16, 0x89, 0xc4, + 0x01, 0x98, 0x7a, 0x50, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x35, 0x48, 0x7c, 0x2d, 0xac, 0xb3, + 0xaa, 0x58, 0x90, 0xb0, 0xff, 0xca, 0x30, 0x5c, 0x58, 0xaa, 0x57, 0x33, 0xd6, 0xd5, 0x15, 0x18, + 0x6e, 0x06, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x0a, + 0x63, 0xfc, 0x42, 0xba, 0xe1, 0x78, 0xcd, 0x96, 0x1c, 0xe2, 0xb3, 0x02, 0x7b, 0xec, 0xae, 0x06, + 0xc3, 0x06, 0xe6, 0x31, 0x17, 0xd5, 0x95, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, 0x62, 0xc1, 0x14, + 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x13, 0x91, 0x70, 0x66, 0x88, 0x9d, 0x74, 0x4b, 0x69, + 0xa3, 0x95, 0x39, 0x02, 0x73, 0x77, 0x13, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, 0x9d, 0x4a, 0x82, + 0x71, 0x57, 0xb3, 0xe8, 0x7b, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, 0x09, 0x6a, + 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, 0xe6, 0x50, 0x21, + 0x89, 0x39, 0xbc, 0x74, 0x78, 0x50, 0x9e, 0x5d, 0xca, 0x24, 0x85, 0x7b, 0x34, 0x83, 0x76, 0x00, + 0xd1, 0xab, 0xb4, 0x1e, 0x39, 0x5b, 0x24, 0x6e, 0x7c, 0x64, 0xf0, 0xc6, 0xcf, 0x1f, 0x1e, 0x94, + 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, 0xb7, 0x16, 0x07, + 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, 0xbb, 0x04, 0xe7, + 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, 0x74, 0x16, 0x86, + 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xaa, 0x65, 0x37, 0x60, 0x6c, + 0xc9, 0x69, 0x3b, 0x1b, 0x6e, 0xcb, 0x8d, 0x5c, 0x12, 0xa2, 0xa7, 0x21, 0xef, 0x34, 0x9b, 0xec, + 0x8a, 0x2c, 0x2d, 0x9e, 0x3b, 0x3c, 0x28, 0xe7, 0x17, 0x9a, 0xf4, 0xac, 0x06, 0x85, 0xb5, 0x8f, + 0x29, 0x06, 0xfa, 0x24, 0x14, 0x9a, 0x81, 0xdf, 0x9e, 0xc9, 0x31, 0x4c, 0x3a, 0x54, 0x85, 0x4a, + 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xf5, 0x1c, 0x3c, 0xb1, 0x44, 0xda, 0xdb, 0x2b, 0xf5, + 0x8c, 0x4d, 0x77, 0x15, 0x8a, 0xbb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xa6, 0xd9, 0x6d, 0xb2, + 0x2a, 0xca, 0xb0, 0x82, 0xa2, 0xcb, 0x50, 0x68, 0xc7, 0x9c, 0xc0, 0x98, 0xe4, 0x22, 0x18, 0x0f, + 0xc0, 0x20, 0x14, 0xa3, 0x13, 0x92, 0x40, 0xdc, 0x82, 0x0a, 0xe3, 0x4e, 0x48, 0x02, 0xcc, 0x20, + 0xf1, 0x71, 0x4a, 0x0f, 0x5a, 0xb1, 0xad, 0x12, 0xc7, 0x29, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, + 0x14, 0xaa, 0x49, 0x1d, 0x1a, 0x7c, 0x52, 0xc7, 0xd9, 0x79, 0xab, 0x66, 0x32, 0x26, 0x62, 0x1c, + 0x03, 0xc3, 0x7d, 0xcf, 0xdb, 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, + 0x07, 0x2e, 0x95, 0xf3, 0xba, 0xe5, 0x37, 0x9c, 0x56, 0xf2, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, 0xdf, + 0x16, 0x3c, 0xb1, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x47, 0x23, 0x80, 0x1c, 0xef, 0xa4, + 0x37, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x3f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x72, 0x1f, + 0x7b, 0xa7, 0xfb, 0x63, 0x4f, 0x60, 0x59, 0xd8, 0xb7, 0x60, 0x62, 0xa9, 0xe5, 0x12, 0x2f, 0xaa, + 0xd6, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x9f, 0x81, 0x09, 0x2a, 0xd3, 0xfa, 0x9d, 0xa8, 0x4e, + 0x1a, 0xbe, 0xc7, 0xd8, 0x7f, 0x2a, 0x09, 0xa2, 0xc3, 0x83, 0xf2, 0xc4, 0xba, 0x01, 0xc1, 0x09, + 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xbb, 0x6d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, 0x4d, 0x2e, + 0x26, 0x7e, 0x06, 0x0a, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x57, 0xe4, 0x46, 0xa1, 0xa3, 0x70, 0x74, + 0x50, 0x3e, 0xdf, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x0d, 0x86, 0xc3, 0xc8, 0x89, 0x3a, + 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xd1, 0x41, 0x79, 0x52, 0x55, 0xe3, 0x45, + 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xc8, 0x2e, 0x09, 0x43, 0x67, 0x4b, 0x72, 0xf8, 0x93, 0xa2, 0xee, + 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x21, 0x12, 0x04, 0x7e, 0x20, 0xf6, 0xe8, 0xb8, + 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x63, 0xc1, 0xa4, 0xea, 0x2b, 0x6f, 0xeb, + 0x14, 0x58, 0xb9, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x43, 0x76, 0x7b, 0x8c, 0x5e, 0xbb, 0x92, 0xca, + 0xa0, 0x74, 0x0d, 0x63, 0x4c, 0x59, 0x15, 0x85, 0x58, 0xa3, 0x66, 0xff, 0x9a, 0x05, 0x67, 0x12, + 0x5f, 0x74, 0xcb, 0x0d, 0x23, 0xf4, 0x4e, 0xd7, 0x57, 0xcd, 0x0d, 0xf6, 0x55, 0xb4, 0x36, 0xfb, + 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xc8, 0x8d, 0xc8, 0xae, 0xfc, 0x98, 0xa7, + 0x7a, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0x55, 0x5a, 0x13, 0x73, 0x02, 0xf6, 0x0f, 0xe5, 0xa1, + 0xc4, 0x97, 0xed, 0xaa, 0xd3, 0x3e, 0x85, 0xb9, 0xa8, 0x42, 0x81, 0x51, 0xe7, 0x1d, 0x7f, 0x3a, + 0xbd, 0xe3, 0xa2, 0x3b, 0x73, 0x54, 0x4e, 0xe3, 0xac, 0xa0, 0xba, 0x1a, 0x68, 0x11, 0x66, 0x24, + 0x90, 0x03, 0xb0, 0xe1, 0x7a, 0x4e, 0xb0, 0x4f, 0xcb, 0x66, 0xf2, 0x8c, 0xe0, 0xf3, 0xbd, 0x09, + 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x06, 0x60, 0x8d, 0xe8, 0xec, 0x2b, 0x50, 0x52, 0xc8, + 0xc7, 0xe1, 0x71, 0x66, 0x3f, 0x0b, 0x93, 0x89, 0xb6, 0xfa, 0x55, 0x1f, 0xd3, 0x59, 0xa4, 0x5f, + 0x61, 0xa7, 0x80, 0xe8, 0xf5, 0xb2, 0xb7, 0x27, 0x4e, 0xd1, 0xf7, 0xe1, 0x6c, 0x2b, 0xe5, 0x70, + 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x84, 0xf8, 0xec, 0xb3, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xbd, + 0xf6, 0xfd, 0x36, 0x5d, 0xf3, 0x4e, 0x8b, 0xf5, 0x57, 0x48, 0xdf, 0xb7, 0x45, 0x19, 0x56, 0x50, + 0x7a, 0x84, 0x9d, 0x55, 0x9d, 0xbf, 0x49, 0xf6, 0xeb, 0xa4, 0x45, 0x1a, 0x91, 0x1f, 0x7c, 0xa8, + 0xdd, 0xbf, 0xc8, 0x47, 0x9f, 0x9f, 0x80, 0xa3, 0x82, 0x40, 0xfe, 0x26, 0xd9, 0xe7, 0x53, 0xa1, + 0x7f, 0x5d, 0xbe, 0xe7, 0xd7, 0xfd, 0x9c, 0x05, 0xe3, 0xea, 0xeb, 0x4e, 0x61, 0xab, 0x2f, 0x9a, + 0x5b, 0xfd, 0x62, 0xcf, 0x05, 0x9e, 0xb1, 0xc9, 0xbf, 0x96, 0x83, 0x0b, 0x0a, 0x87, 0xb2, 0xfb, + 0xfc, 0x8f, 0x58, 0x55, 0xf3, 0x50, 0xf2, 0x94, 0xf6, 0xc0, 0x32, 0xc5, 0xf6, 0x58, 0x77, 0x10, + 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x45, 0xfc, 0x31, 0x5d, 0xad, 0x26, 0x54, 0x68, 0x8b, 0x90, 0xef, + 0xb8, 0x4d, 0x71, 0x67, 0x7c, 0x4a, 0x8e, 0xf6, 0x9d, 0x6a, 0xe5, 0xe8, 0xa0, 0xfc, 0x64, 0x96, + 0x4a, 0x97, 0x5e, 0x56, 0xe1, 0xdc, 0x9d, 0x6a, 0x05, 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xd6, + 0xfa, 0x2e, 0xe5, 0xa0, 0x7c, 0x4f, 0x5c, 0x2d, 0x4a, 0x37, 0x86, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, + 0x0a, 0x4c, 0xed, 0x74, 0x36, 0x48, 0x8b, 0x44, 0xfc, 0x83, 0x6f, 0x12, 0xae, 0x39, 0x2a, 0xc5, + 0xa2, 0xe5, 0xcd, 0x04, 0x1c, 0x77, 0xd5, 0xb0, 0xff, 0x94, 0x1d, 0xf1, 0x62, 0xf4, 0x6a, 0x81, + 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x61, 0x2e, 0xe7, 0x41, 0x56, 0xc5, 0x4d, 0xb2, 0xbf, 0xee, 0x53, + 0x66, 0x3b, 0x7d, 0x55, 0x18, 0x6b, 0xbe, 0xd0, 0x73, 0xcd, 0xff, 0x42, 0x0e, 0xce, 0xa9, 0x11, + 0x30, 0xf8, 0xba, 0x3f, 0xeb, 0x63, 0xf0, 0x02, 0x8c, 0x36, 0xc9, 0xa6, 0xd3, 0x69, 0x45, 0x4a, + 0x8d, 0x39, 0xc4, 0x55, 0xd9, 0x95, 0xb8, 0x18, 0xeb, 0x38, 0xc7, 0x18, 0xb6, 0x9f, 0x1c, 0x65, + 0x77, 0x6b, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xb9, 0x6b, 0x9e, 0x82, 0x21, 0x77, 0x97, + 0xf2, 0x5a, 0x39, 0x93, 0x85, 0xaa, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x13, 0x30, 0xd2, 0xf0, 0x77, + 0x77, 0x1d, 0xaf, 0xc9, 0xae, 0xbc, 0xd2, 0xe2, 0x28, 0x65, 0xc7, 0x96, 0x78, 0x11, 0x96, 0x30, + 0xf4, 0x04, 0x14, 0x9c, 0x60, 0x2b, 0x9c, 0x29, 0x30, 0x9c, 0x22, 0x6d, 0x69, 0x21, 0xd8, 0x0a, + 0x31, 0x2b, 0xa5, 0x52, 0xd5, 0x7d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x8a, 0x1b, 0x88, 0x2d, 0xa1, + 0xee, 0xc2, 0x7b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x02, 0x43, 0x6d, 0x3f, 0x88, 0xc2, 0x99, 0x61, + 0x36, 0xdc, 0x4f, 0x66, 0x1c, 0x44, 0xfc, 0x6b, 0x6b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, + 0x98, 0x57, 0x47, 0xdf, 0x06, 0x79, 0xe2, 0xed, 0xcd, 0x8c, 0x30, 0x2a, 0xb3, 0x69, 0x54, 0x96, + 0xbd, 0xbd, 0xbb, 0x4e, 0x10, 0x9f, 0xd2, 0xcb, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0xe7, 0xa1, 0x24, + 0xb7, 0x78, 0x28, 0xd4, 0x1c, 0xa9, 0x4b, 0x4c, 0x1e, 0x0c, 0x98, 0xbc, 0xd7, 0x71, 0x03, 0xb2, + 0x4b, 0xbc, 0x28, 0x8c, 0xcf, 0x34, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x5e, 0xea, 0xd6, 0x56, + 0xfd, 0x8e, 0x17, 0x85, 0x33, 0x25, 0xd6, 0xbd, 0xd4, 0x57, 0x8f, 0xbb, 0x31, 0x5e, 0x52, 0xf9, + 0xc6, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xde, 0x72, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x16, 0xf8, + 0x1b, 0x64, 0x06, 0x58, 0xcf, 0x2f, 0xa4, 0x3f, 0x06, 0xf8, 0x1b, 0x64, 0x71, 0xfa, 0xf0, 0xa0, + 0x3c, 0x7e, 0x4b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x0e, 0x4c, 0x50, 0xb9, 0xc6, 0x8d, 0x89, 0x8e, + 0xf6, 0x23, 0xca, 0xa4, 0x0f, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x4d, 0x28, 0xb5, 0xdc, 0x4d, + 0xd2, 0xd8, 0x6f, 0xb4, 0xc8, 0xcc, 0x18, 0xa3, 0x98, 0xba, 0xad, 0x6e, 0x49, 0x24, 0x2e, 0x17, + 0xa9, 0xbf, 0x38, 0xae, 0x8e, 0xee, 0xc2, 0xf9, 0x88, 0x04, 0xbb, 0xae, 0xe7, 0xd0, 0xed, 0x20, + 0xe4, 0x05, 0xf6, 0xa4, 0x32, 0xce, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0xe7, 0xd7, 0x53, 0xb1, 0x70, + 0x46, 0x6d, 0x74, 0x1b, 0x26, 0xd9, 0x4e, 0xa8, 0x75, 0x5a, 0xad, 0x9a, 0xdf, 0x72, 0x1b, 0xfb, + 0x33, 0x13, 0x8c, 0xe0, 0x27, 0xe4, 0xbd, 0x50, 0x35, 0xc1, 0x47, 0x07, 0x65, 0x88, 0xff, 0xe1, + 0x64, 0x6d, 0xb4, 0xc1, 0x74, 0xe8, 0x9d, 0xc0, 0x8d, 0xf6, 0xe9, 0xfa, 0x25, 0x0f, 0xa2, 0x99, + 0xc9, 0x9e, 0xa2, 0xb0, 0x8e, 0xaa, 0x14, 0xed, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x5b, 0x3b, 0x8c, + 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, 0x73, 0x18, 0xd3, 0x9f, + 0xd3, 0x1f, 0xb7, 0xe9, 0x09, 0x3a, 0xcd, 0x10, 0x63, 0xfd, 0xb9, 0x04, 0xe0, 0x18, 0x87, 0x32, + 0x35, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0xda, 0x2e, 0xeb, 0xeb, 0x9f, 0xc7, 0xb4, 0x1c, 0xdd, + 0x82, 0x11, 0xe2, 0xed, 0xad, 0x04, 0xfe, 0xee, 0xcc, 0x99, 0xec, 0x3d, 0xbb, 0xcc, 0x51, 0xf8, + 0x81, 0x1e, 0x0b, 0x78, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0x01, 0xcc, 0xa4, 0xcc, 0x08, 0x9f, 0x80, + 0xb3, 0x6c, 0x02, 0x5e, 0x17, 0x75, 0x67, 0xd6, 0x33, 0xf0, 0x8e, 0x7a, 0xc0, 0x70, 0x26, 0x75, + 0xf4, 0x05, 0x18, 0xe7, 0x1b, 0x8a, 0x3f, 0xbe, 0x85, 0x33, 0xe7, 0xd8, 0xd7, 0x5c, 0xce, 0xde, + 0x9c, 0x1c, 0x71, 0xf1, 0x9c, 0xe8, 0xd0, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x9a, 0xbd, 0x01, 0x13, + 0xea, 0xdc, 0x62, 0x4b, 0x07, 0x95, 0x61, 0x88, 0x71, 0x3b, 0x42, 0xbf, 0x55, 0xa2, 0x33, 0xc5, + 0x38, 0x21, 0xcc, 0xcb, 0xd9, 0x4c, 0xb9, 0xef, 0x93, 0xc5, 0xfd, 0x88, 0x70, 0xa9, 0x3a, 0xaf, + 0xcd, 0x94, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x7f, 0x9c, 0x6b, 0x8c, 0x0f, 0xc7, 0x01, 0xae, 0x83, + 0xe7, 0xa0, 0xb8, 0xed, 0x87, 0x11, 0xc5, 0x66, 0x6d, 0x0c, 0xc5, 0x7c, 0xe2, 0x0d, 0x51, 0x8e, + 0x15, 0x06, 0x7a, 0x0d, 0xc6, 0x1b, 0x7a, 0x03, 0xe2, 0x2e, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, + 0x5c, 0xf4, 0x2a, 0x14, 0xd9, 0xd3, 0x79, 0xc3, 0x6f, 0x09, 0x26, 0x4b, 0x5e, 0xc8, 0xc5, 0x9a, + 0x28, 0x3f, 0xd2, 0x7e, 0x63, 0x85, 0x8d, 0xae, 0xc0, 0x30, 0xed, 0x42, 0xb5, 0x26, 0x6e, 0x11, + 0xa5, 0xaa, 0xb9, 0xc1, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0x2d, 0xa7, 0x8d, 0x32, 0x95, 0x48, 0x09, + 0xaa, 0xc1, 0xc8, 0x7d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, 0xb0, 0x0b, 0xcf, 0xf4, 0xbc, 0x52, 0x58, + 0xa5, 0x7b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x41, 0xc7, 0xf3, 0x28, + 0xc5, 0xdc, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0xef, 0x00, 0xc8, + 0x65, 0x49, 0x9a, 0xe2, 0xc9, 0xfa, 0xb9, 0xfe, 0x44, 0xd7, 0x55, 0x9d, 0xc5, 0x09, 0x7a, 0xa5, + 0xc6, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x5b, 0xd5, 0xdd, 0x19, 0xf4, 0x9d, 0xf4, 0x24, 0x70, + 0x82, 0x88, 0x34, 0x17, 0x22, 0x31, 0x38, 0x9f, 0x1c, 0x4c, 0xa6, 0x58, 0x77, 0x77, 0x89, 0x7e, + 0x6a, 0x08, 0x22, 0x38, 0xa6, 0x67, 0xff, 0x52, 0x1e, 0x66, 0xb2, 0xba, 0x4b, 0x17, 0x1d, 0x79, + 0xe0, 0x46, 0x4b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, + 0xba, 0x5b, 0x52, 0x24, 0x1c, 0x8a, 0x67, 0xbf, 0xce, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, + 0xa1, 0xb0, 0x89, 0xd0, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0x53, 0xa1, 0x8f, 0xbe, + 0xc9, 0x18, 0xa2, 0xa1, 0x93, 0x1d, 0x22, 0xf4, 0x45, 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, + 0x7d, 0xf8, 0xd8, 0xd4, 0x15, 0x2f, 0xb5, 0xa2, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x19, 0x46, 0xd5, + 0x06, 0xac, 0x56, 0xd8, 0x03, 0x91, 0xf6, 0xe0, 0x1e, 0x9f, 0x46, 0x15, 0xac, 0xe3, 0xd9, 0xef, + 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xae, 0xf7, 0xf8, 0xda, 0xbf, + 0x91, 0x87, 0x49, 0xa3, 0xb1, 0x4e, 0x38, 0xc0, 0x99, 0x75, 0x9d, 0xde, 0x73, 0x4e, 0x44, 0xc4, + 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, 0x47, 0x5f, 0x84, 0x52, 0xcb, + 0x09, 0x99, 0xee, 0x8a, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0x58, 0x8e, 0x70, 0xc2, 0x48, 0xbb, 0x6a, + 0x38, 0xed, 0x98, 0x24, 0xbd, 0x90, 0x29, 0xef, 0x23, 0x8d, 0x6e, 0x54, 0x27, 0x28, 0x83, 0xb4, + 0x8f, 0x39, 0x0c, 0xbd, 0x0a, 0x63, 0x01, 0x61, 0xab, 0x62, 0x89, 0xb2, 0x72, 0x6c, 0x99, 0x0d, + 0xc5, 0x3c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0x59, 0xf9, 0xe1, 0x1e, 0xac, 0xfc, 0x33, 0x30, + 0xc2, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0x27, 0x17, 0x4c, 0x71, 0xc0, + 0x05, 0xf3, 0x49, 0x98, 0xa8, 0x38, 0x64, 0xd7, 0xf7, 0x96, 0xbd, 0x66, 0xdb, 0x77, 0xbd, 0x08, + 0xcd, 0x40, 0x81, 0xdd, 0x0e, 0x7c, 0x6f, 0x17, 0x28, 0x05, 0x5c, 0xa0, 0x8c, 0xb9, 0xbd, 0x05, + 0xe7, 0x2a, 0xfe, 0x7d, 0xef, 0xbe, 0x13, 0x34, 0x17, 0x6a, 0x55, 0x4d, 0xce, 0x5d, 0x93, 0x72, + 0x16, 0x37, 0x62, 0x49, 0x3d, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x5b, 0x24, 0x43, 0x1b, + 0xf1, 0x37, 0x72, 0x46, 0x4b, 0x31, 0xbe, 0x7a, 0x30, 0xb2, 0x32, 0x1f, 0x8c, 0xde, 0x82, 0xe2, + 0xa6, 0x4b, 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x12, 0x7b, 0x3a, 0xfb, 0x5d, 0x7e, 0x85, 0x62, 0x4a, + 0xed, 0x13, 0x97, 0xd2, 0x56, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x92, 0x62, 0x80, 0x84, + 0x8a, 0x05, 0xf7, 0x4c, 0x2f, 0xd9, 0xc2, 0x24, 0x7e, 0xf6, 0xf0, 0xa0, 0x3c, 0x85, 0x13, 0x64, + 0x70, 0x17, 0x61, 0x2a, 0x96, 0xed, 0xd2, 0xa3, 0xb5, 0xc0, 0x86, 0x9f, 0x89, 0x65, 0x4c, 0xc2, + 0x64, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0xd7, 0xc8, 0x08, 0x49, 0xfb, 0x84, 0x67, 0x21, 0x29, + 0xf9, 0xe6, 0xfa, 0x4b, 0xbe, 0xf6, 0xdf, 0xb7, 0xe0, 0xec, 0xf2, 0x6e, 0x3b, 0xda, 0xaf, 0xb8, + 0xe6, 0xeb, 0xce, 0x2b, 0x30, 0xbc, 0x4b, 0x9a, 0x6e, 0x67, 0x57, 0xcc, 0x5c, 0x59, 0x1e, 0x3f, + 0xab, 0xac, 0xf4, 0xe8, 0xa0, 0x3c, 0x5e, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, + 0x76, 0x88, 0xbb, 0xef, 0x93, 0x5b, 0xee, 0xae, 0x2b, 0xed, 0x2c, 0x7a, 0xea, 0xce, 0xe6, 0xe4, + 0x80, 0xce, 0xbd, 0xd5, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0xc5, 0xc3, 0x8c, 0x24, 0x82, 0x63, 0x7a, + 0xf6, 0x37, 0x2c, 0x98, 0x94, 0xeb, 0x7e, 0xa1, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0xc8, 0xb9, + 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0xcc, 0x55, 0x6b, 0x38, 0xe7, 0xb6, 0x51, 0x0d, 0x4a, 0xdc, 0x5c, + 0x23, 0x5e, 0x5c, 0x03, 0x19, 0x7d, 0xb0, 0x1e, 0xac, 0xcb, 0x9a, 0x38, 0x26, 0x22, 0x39, 0x38, + 0x76, 0x66, 0xe6, 0xcd, 0x57, 0xaf, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x8a, 0x9e, 0xdf, + 0xe4, 0xd6, 0x33, 0xfc, 0xf6, 0x63, 0x4b, 0x76, 0x4d, 0x94, 0x61, 0x05, 0xb5, 0x7f, 0xd0, 0x82, + 0x31, 0xf9, 0x65, 0x03, 0x32, 0x93, 0x74, 0x6b, 0xc5, 0x8c, 0x64, 0xbc, 0xb5, 0x28, 0x33, 0xc8, + 0x20, 0x06, 0x0f, 0x98, 0x3f, 0x0e, 0x0f, 0x68, 0xff, 0x68, 0x0e, 0x26, 0x64, 0x77, 0xea, 0x9d, + 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x72, 0x22, 0x57, 0xec, 0x53, 0xe9, 0xc2, 0x87, + 0x31, 0x3f, 0xf1, 0xb5, 0xbc, 0x20, 0x6b, 0xe3, 0x98, 0x10, 0x6a, 0xc1, 0xb4, 0xe7, 0x47, 0xec, + 0x88, 0x56, 0xf0, 0x5e, 0x4f, 0x20, 0x49, 0xea, 0x17, 0x04, 0xf5, 0xe9, 0xb5, 0x24, 0x15, 0xdc, + 0x4d, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x3e, 0x5b, 0xdc, 0xd0, 0x67, 0x21, 0x5d, 0xdf, 0x61, 0xff, + 0xaa, 0x05, 0x25, 0x89, 0x76, 0x1a, 0xaf, 0x5d, 0xab, 0x30, 0x12, 0xb2, 0x49, 0x90, 0x43, 0x63, + 0xf7, 0xea, 0x38, 0x9f, 0xaf, 0xf8, 0xe6, 0xe1, 0xff, 0x43, 0x2c, 0x69, 0x30, 0x7d, 0xb7, 0xea, + 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x71, 0xc3, 0xfc, 0x57, 0xd6, 0x67, 0x4d, 0xac, 0xa5, + 0x0c, 0x52, 0x3b, 0x20, 0x9b, 0xee, 0x83, 0x24, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x3b, + 0x30, 0xd6, 0x90, 0x8a, 0xce, 0xf8, 0x18, 0xb8, 0xd2, 0x53, 0xe9, 0xae, 0xde, 0x67, 0xb8, 0x65, + 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x7c, 0xbf, 0xe7, 0xf6, 0x98, 0x6e, 0xf6, + 0xe3, 0xf3, 0x8f, 0x5b, 0x30, 0xcc, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x78, 0xec, + 0xee, 0xd2, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xc4, 0x7e, 0x30, 0xb5, 0x41, 0x3e, 0xdb, 0xa4, + 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0xae, 0xac, 0x86, 0x63, 0x0a, 0xf6, 0x0f, 0xe7, 0xe9, 0x51, 0x15, + 0xa3, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xdc, 0xa3, 0xba, 0xc1, 0xb7, 0x60, 0xb2, 0xa1, + 0x3d, 0x6e, 0xc5, 0x33, 0x79, 0xb5, 0xe7, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc9, 0x24, + 0x82, 0x93, 0x54, 0xd1, 0x77, 0xc2, 0x18, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44, 0xf6, + 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x97, 0x8a, 0x30, 0xb4, + 0xbc, 0x47, 0xbc, 0xe8, 0x14, 0x0e, 0xa4, 0x06, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xda, 0x23, 0x4d, + 0x0e, 0x3f, 0xce, 0xe5, 0x7a, 0x5e, 0x90, 0x9e, 0xa8, 0x1a, 0x24, 0x70, 0x82, 0xe4, 0xa3, 0x90, + 0x30, 0xaf, 0xc3, 0x30, 0x9f, 0x7b, 0x21, 0x5e, 0xa6, 0x2a, 0x83, 0xd9, 0x20, 0x8a, 0x5d, 0x10, + 0x4b, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x62, 0xd3, 0x0d, 0xc2, 0x88, 0x8a, 0x86, + 0x61, 0xe4, 0xec, 0xb6, 0x1f, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, + 0x0b, 0xc6, 0xa9, 0x90, 0x13, 0x37, 0x35, 0x72, 0xec, 0xa6, 0x94, 0xca, 0xe8, 0x96, 0x4e, 0x08, + 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x60, 0x42, 0x51, 0x91, 0x71, 0x14, 0xea, 0x30, 0xe1, 0xd2, 0x10, + 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcc, 0x33, 0x49, 0x33, 0x4e, 0xf9, 0x12, 0x94, 0x08, + 0x1d, 0x42, 0x4a, 0x58, 0x28, 0xc6, 0xe7, 0x07, 0xeb, 0xeb, 0xaa, 0xdb, 0x08, 0x7c, 0x53, 0x96, + 0x5f, 0x96, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc1, 0x70, 0x48, 0x02, 0x97, 0x84, 0x42, 0x45, 0xde, + 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0x3d, 0xe7, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, 0xa4, 0x21, + 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0x91, 0x80, 0xb4, 0x98, + 0xb2, 0x68, 0x7c, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x01, + 0xa1, 0x3c, 0x84, 0xeb, 0x6d, 0x29, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x5c, 0xb4, 0x7f, 0x06, 0xc7, + 0x18, 0xd2, 0x2a, 0x15, 0xa7, 0x54, 0x43, 0xd7, 0x61, 0x5a, 0x95, 0x56, 0xbd, 0x30, 0x72, 0xbc, + 0x06, 0x61, 0x6a, 0xee, 0x52, 0xcc, 0x15, 0xe1, 0x24, 0x02, 0xee, 0xae, 0x63, 0xff, 0x0c, 0x65, + 0x67, 0xe8, 0x68, 0x9d, 0x02, 0x2f, 0xf0, 0x86, 0xc9, 0x0b, 0x5c, 0xc8, 0x9c, 0xb9, 0x0c, 0x3e, + 0xe0, 0xd0, 0x82, 0x51, 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xf5, 0x58, 0xb3, 0x1d, 0x98, 0xa2, 0x2b, + 0xfd, 0xf6, 0x46, 0x48, 0x82, 0x3d, 0xd2, 0x64, 0x0b, 0x33, 0xf7, 0x70, 0x0b, 0x53, 0xbd, 0x32, + 0xdf, 0x4a, 0x10, 0xc4, 0x5d, 0x4d, 0xa0, 0x57, 0xa4, 0xe6, 0x24, 0x6f, 0x18, 0x69, 0x71, 0xad, + 0xc8, 0xd1, 0x41, 0x79, 0x4a, 0xfb, 0x10, 0x5d, 0x53, 0x62, 0x7f, 0x49, 0x7e, 0xa3, 0x7a, 0xcd, + 0x6f, 0xa8, 0xc5, 0x92, 0x78, 0xcd, 0x57, 0xcb, 0x01, 0xc7, 0x38, 0x74, 0x8f, 0x52, 0x11, 0x24, + 0xf9, 0x9a, 0x4f, 0x05, 0x14, 0xcc, 0x20, 0xf6, 0x8b, 0x00, 0xcb, 0x0f, 0x48, 0x83, 0x2f, 0x75, + 0xfd, 0x01, 0xd2, 0xca, 0x7e, 0x80, 0xb4, 0xff, 0x9d, 0x05, 0x13, 0x2b, 0x4b, 0x86, 0x98, 0x38, + 0x07, 0xc0, 0x65, 0xa3, 0x7b, 0xf7, 0xd6, 0xa4, 0x6e, 0x9d, 0xab, 0x47, 0x55, 0x29, 0xd6, 0x30, + 0xd0, 0x05, 0xc8, 0xb7, 0x3a, 0x9e, 0x10, 0x59, 0x46, 0x0e, 0x0f, 0xca, 0xf9, 0x5b, 0x1d, 0x0f, + 0xd3, 0x32, 0xcd, 0x42, 0x30, 0x3f, 0xb0, 0x85, 0x60, 0x5f, 0xf7, 0x2a, 0x54, 0x86, 0xa1, 0xfb, + 0xf7, 0xdd, 0x26, 0x37, 0x62, 0x17, 0x7a, 0xff, 0x7b, 0xf7, 0xaa, 0x95, 0x10, 0xf3, 0x72, 0xfb, + 0xab, 0x79, 0x98, 0x5d, 0x69, 0x91, 0x07, 0x1f, 0xd0, 0x90, 0x7f, 0x50, 0xfb, 0xc6, 0xe3, 0xf1, + 0x8b, 0xc7, 0xb5, 0x61, 0xed, 0x3f, 0x1e, 0x9b, 0x30, 0xc2, 0x1f, 0xb3, 0xa5, 0x59, 0xff, 0x6b, + 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x1c, 0x7f, 0x14, 0x17, 0xe6, 0xfc, 0xea, 0xa6, 0x15, 0xa5, 0x58, + 0x12, 0x9f, 0xfd, 0x0c, 0x8c, 0xe9, 0x98, 0xc7, 0xb2, 0x26, 0xff, 0x0b, 0x79, 0x98, 0xa2, 0x3d, + 0x78, 0xa4, 0x13, 0x71, 0xa7, 0x7b, 0x22, 0x4e, 0xda, 0xa2, 0xb8, 0xff, 0x6c, 0xbc, 0x93, 0x9c, + 0x8d, 0x17, 0xb2, 0x66, 0xe3, 0xb4, 0xe7, 0xe0, 0x7b, 0x2d, 0x38, 0xb3, 0xd2, 0xf2, 0x1b, 0x3b, + 0x09, 0xab, 0xdf, 0x97, 0x61, 0x94, 0x9e, 0xe3, 0xa1, 0xe1, 0x45, 0x64, 0xf8, 0x95, 0x09, 0x10, + 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0x54, 0x2b, 0x69, 0xee, 0x68, 0x02, 0x84, 0x75, 0x3c, 0xfb, + 0xeb, 0x16, 0x5c, 0xbc, 0xbe, 0xb4, 0x1c, 0x2f, 0xc5, 0x2e, 0x8f, 0x38, 0x2a, 0x05, 0x36, 0xb5, + 0xae, 0xc4, 0x52, 0x60, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2a, 0xde, 0x9e, 0x3f, 0x6d, 0xc1, 0x99, + 0xeb, 0x6e, 0x44, 0xaf, 0xe5, 0xa4, 0x6f, 0x16, 0xbd, 0x97, 0x43, 0x37, 0xf2, 0x83, 0xfd, 0xa4, + 0x6f, 0x16, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0x5c, 0x66, 0x46, 0x95, 0x33, 0x55, 0x51, + 0x58, 0x94, 0x63, 0x85, 0x41, 0x3f, 0xac, 0xe9, 0x06, 0x4c, 0x94, 0xd8, 0x17, 0x27, 0xac, 0xfa, + 0xb0, 0x8a, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x31, 0x0b, 0xce, 0x5d, 0x6f, 0x75, 0xc2, 0x88, 0x04, + 0x9b, 0xa1, 0xd1, 0xd9, 0x17, 0xa1, 0x44, 0xa4, 0xb8, 0x2e, 0xfa, 0xaa, 0x18, 0x4c, 0x25, 0xc7, + 0x73, 0xc7, 0x30, 0x85, 0x37, 0x80, 0xe7, 0xc0, 0xf1, 0x5c, 0xc7, 0x7e, 0x3e, 0x07, 0xe3, 0x37, + 0xd6, 0xd7, 0x6b, 0xd7, 0x49, 0x24, 0x6e, 0xb1, 0xfe, 0xaa, 0x66, 0xac, 0x69, 0xcc, 0x7a, 0x09, + 0x45, 0x9d, 0xc8, 0x6d, 0xcd, 0x71, 0x4f, 0xe4, 0xb9, 0xaa, 0x17, 0xdd, 0x0e, 0xea, 0x51, 0xe0, + 0x7a, 0x5b, 0xa9, 0x3a, 0x36, 0x79, 0xd7, 0xe6, 0xb3, 0xee, 0x5a, 0xf4, 0x22, 0x0c, 0x33, 0x57, + 0x68, 0x29, 0x9e, 0x3c, 0xae, 0x64, 0x0a, 0x56, 0x7a, 0x74, 0x50, 0x2e, 0xdd, 0xc1, 0x55, 0xfe, + 0x07, 0x0b, 0x54, 0x74, 0x07, 0x46, 0xb7, 0xa3, 0xa8, 0x7d, 0x83, 0x38, 0x4d, 0x12, 0xc8, 0xd3, + 0xe1, 0x52, 0xda, 0xe9, 0x40, 0x07, 0x81, 0xa3, 0xc5, 0x1b, 0x2a, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, + 0xae, 0x03, 0xc4, 0xb0, 0x13, 0xd2, 0x2f, 0xd8, 0x7f, 0x60, 0xc1, 0x08, 0xf7, 0x4a, 0x0b, 0xd0, + 0xeb, 0x50, 0x20, 0x0f, 0x48, 0x43, 0x70, 0x8e, 0xa9, 0x1d, 0x8e, 0x19, 0x0f, 0xae, 0x2d, 0xa7, + 0xff, 0x31, 0xab, 0x85, 0x6e, 0xc0, 0x08, 0xed, 0xed, 0x75, 0xe5, 0xa2, 0xf7, 0x64, 0xd6, 0x17, + 0xab, 0x69, 0xe7, 0xbc, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xda, 0x75, 0x7a, 0x80, + 0x45, 0xbd, 0xee, 0xd9, 0xf5, 0xa5, 0x1a, 0x47, 0x12, 0xd4, 0xb8, 0xe6, 0x57, 0x16, 0xe2, 0x98, + 0x88, 0xbd, 0x0e, 0x25, 0x3a, 0xa9, 0x0b, 0x2d, 0xd7, 0xe9, 0xad, 0x74, 0x7e, 0x16, 0x4a, 0x52, + 0x01, 0x1c, 0x0a, 0xc7, 0x26, 0x46, 0x55, 0xea, 0x87, 0x43, 0x1c, 0xc3, 0xed, 0x4d, 0x38, 0xcb, + 0x5e, 0xfe, 0x9d, 0x68, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0x10, 0xe3, 0x33, 0x33, + 0xa3, 0xf9, 0x0e, 0x8c, 0x49, 0x8a, 0xb1, 0x50, 0x66, 0xff, 0x61, 0x01, 0x1e, 0xaf, 0xd6, 0xb3, + 0x1d, 0x16, 0x5f, 0x85, 0x31, 0xce, 0xa6, 0xd1, 0xa5, 0xed, 0xb4, 0x44, 0xbb, 0xea, 0x5d, 0x6c, + 0x5d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x84, 0xbc, 0xfb, 0x9e, 0x97, 0x34, 0xc3, 0xad, 0xbe, 0xb5, + 0x86, 0x69, 0x39, 0x05, 0x53, 0x8e, 0x8f, 0x1f, 0xa5, 0x0a, 0xac, 0xb8, 0xbe, 0x37, 0x60, 0xc2, + 0x0d, 0x1b, 0xa1, 0x5b, 0xf5, 0xe8, 0x39, 0x13, 0x3b, 0xbb, 0xc6, 0x4a, 0x02, 0xda, 0x69, 0x05, + 0xc5, 0x09, 0x6c, 0xed, 0x5c, 0x1f, 0x1a, 0x98, 0x6b, 0xec, 0xeb, 0xe9, 0x43, 0x19, 0xe2, 0x36, + 0xfb, 0xba, 0x90, 0x19, 0xb5, 0x09, 0x86, 0x98, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x4a, 0x60, 0x8d, + 0x6d, 0xa7, 0xbd, 0xd0, 0x89, 0xb6, 0x2b, 0x6e, 0xd8, 0xf0, 0xf7, 0x48, 0xb0, 0xcf, 0x84, 0xe7, + 0x62, 0x2c, 0x81, 0x29, 0xc0, 0xd2, 0x8d, 0x85, 0x1a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0xae, 0x10, + 0x4e, 0x82, 0x2b, 0x5c, 0x80, 0x49, 0xd9, 0x4c, 0x9d, 0x84, 0xec, 0x8e, 0x18, 0x65, 0x1d, 0x53, + 0xa6, 0xb6, 0xa2, 0x58, 0x75, 0x2b, 0x89, 0x8f, 0x5e, 0x81, 0x71, 0xd7, 0x73, 0x23, 0xd7, 0x89, + 0xfc, 0x80, 0xdd, 0xb0, 0x5c, 0x4e, 0x66, 0x96, 0x6c, 0x55, 0x1d, 0x80, 0x4d, 0x3c, 0xfb, 0x3f, + 0x15, 0x60, 0x9a, 0x4d, 0xdb, 0xb7, 0x56, 0xd8, 0x47, 0x66, 0x85, 0xdd, 0xe9, 0x5e, 0x61, 0x27, + 0xc1, 0xee, 0x7e, 0x98, 0xcb, 0xec, 0x5d, 0x28, 0x29, 0x5b, 0x60, 0xe9, 0x0c, 0x60, 0x65, 0x38, + 0x03, 0xf4, 0xe7, 0x3e, 0xe4, 0x33, 0x6e, 0x3e, 0xf5, 0x19, 0xf7, 0x6f, 0x59, 0x10, 0x9b, 0x44, + 0xa2, 0x1b, 0x50, 0x6a, 0xfb, 0xcc, 0xec, 0x20, 0x90, 0xb6, 0x3c, 0x8f, 0xa7, 0x5e, 0x54, 0xfc, + 0x52, 0xe4, 0xe3, 0x57, 0x93, 0x35, 0x70, 0x5c, 0x19, 0x2d, 0xc2, 0x48, 0x3b, 0x20, 0xf5, 0x88, + 0xb9, 0xc0, 0xf6, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0x0b, 0x16, 0x00, 0x7f, + 0x29, 0x75, 0xbc, 0x2d, 0x72, 0x0a, 0xda, 0xdf, 0x0a, 0x14, 0xc2, 0x36, 0x69, 0xf4, 0x32, 0x08, + 0x89, 0xfb, 0x53, 0x6f, 0x93, 0x46, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0xef, 0x03, 0x98, + 0x88, 0xd1, 0xaa, 0x11, 0xd9, 0x45, 0xcf, 0x1b, 0x2e, 0x71, 0x17, 0x12, 0x2e, 0x71, 0x25, 0x86, + 0xad, 0x29, 0x1a, 0xdf, 0x85, 0xfc, 0xae, 0xf3, 0x40, 0x68, 0x92, 0x9e, 0xed, 0xdd, 0x0d, 0x4a, + 0x7f, 0x6e, 0xd5, 0x79, 0xc0, 0x65, 0xa6, 0x67, 0xe5, 0x02, 0x59, 0x75, 0x1e, 0x1c, 0x71, 0xb3, + 0x0f, 0x76, 0x48, 0xdd, 0x72, 0xc3, 0xe8, 0xcb, 0xff, 0x31, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, + 0xb5, 0xe5, 0x7a, 0xe2, 0xdd, 0x70, 0xa0, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x0d, 0xd0, 0x96, + 0xeb, 0xa1, 0xf7, 0x61, 0x44, 0xbc, 0xd1, 0x33, 0x5b, 0x6f, 0x53, 0x4b, 0x95, 0xd5, 0x9e, 0x78, + 0xe2, 0xe7, 0x6d, 0xce, 0x4b, 0x99, 0x50, 0x94, 0xf6, 0x6d, 0x57, 0x36, 0x88, 0xfe, 0xba, 0x05, + 0x13, 0xe2, 0x37, 0x26, 0xef, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xd3, 0x83, 0xf7, 0x41, 0x54, + 0xe4, 0x5d, 0xf9, 0xb4, 0x3c, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0xa1, 0x05, + 0x67, 0x77, 0x9d, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0xed, 0xfa, 0xeb, 0x83, + 0x4d, 0x7f, 0x57, 0x75, 0xde, 0x49, 0x69, 0xe6, 0x7a, 0x36, 0x0d, 0xa5, 0x6f, 0x57, 0x53, 0xfb, + 0x35, 0xbb, 0x09, 0x45, 0xb9, 0xde, 0x52, 0x24, 0xef, 0x8a, 0xce, 0x58, 0x1f, 0xdb, 0x44, 0x42, + 0xf7, 0x4b, 0xa3, 0xed, 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0xc6, 0xf4, 0x35, 0xf6, 0x48, + 0xdb, 0x7a, 0x0f, 0xce, 0xa4, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0xb8, 0x90, 0xb9, 0x3e, 0x1e, + 0x65, 0xc3, 0xf6, 0xcf, 0x5b, 0xfa, 0x39, 0x78, 0x0a, 0x2a, 0xf8, 0x25, 0x53, 0x05, 0x7f, 0xa9, + 0xf7, 0xce, 0xc9, 0xd0, 0xc3, 0xbf, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x6e, 0xd1, + 0x12, 0x69, 0x1c, 0x62, 0xf7, 0xdf, 0x91, 0x31, 0x2f, 0xc5, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0xbf, + 0x6c, 0x41, 0xe1, 0x14, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x7c, 0x26, 0x69, 0x11, 0xd2, 0x6c, 0x0e, + 0x3b, 0xf7, 0x97, 0x1f, 0x44, 0xc4, 0x0b, 0x99, 0xa8, 0x98, 0x3a, 0x30, 0xdf, 0x05, 0x67, 0x6e, + 0xf9, 0x4e, 0x73, 0xd1, 0x69, 0x39, 0x5e, 0x83, 0x04, 0x55, 0x6f, 0xab, 0xaf, 0x95, 0x92, 0x6e, + 0x53, 0x94, 0xeb, 0x67, 0x53, 0x64, 0x6f, 0x03, 0xd2, 0x1b, 0x10, 0x76, 0x9c, 0x18, 0x46, 0x5c, + 0xde, 0x94, 0x18, 0xfe, 0xa7, 0xd3, 0xb9, 0xbb, 0xae, 0x9e, 0x69, 0x16, 0x8a, 0xbc, 0x00, 0x4b, + 0x42, 0xf6, 0xab, 0x90, 0xea, 0xbb, 0xd5, 0x5f, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x59, 0xcd, 0x63, + 0x8a, 0xb4, 0x76, 0x42, 0x49, 0x97, 0x12, 0x32, 0xca, 0xfe, 0x8a, 0x05, 0x93, 0x6b, 0x89, 0xf8, + 0x15, 0x57, 0xd8, 0x7b, 0x60, 0x8a, 0x6e, 0xb8, 0xce, 0x4a, 0xb1, 0x80, 0x9e, 0xb8, 0x0e, 0xea, + 0x4f, 0x2d, 0x88, 0xdd, 0x29, 0x4f, 0x81, 0xf1, 0x5a, 0x32, 0x18, 0xaf, 0x54, 0xdd, 0x88, 0xea, + 0x4e, 0x16, 0xdf, 0x85, 0x6e, 0xaa, 0xd8, 0x01, 0x3d, 0xd4, 0x22, 0x31, 0x19, 0xee, 0x69, 0x3e, + 0x61, 0x06, 0x18, 0x90, 0xd1, 0x04, 0x98, 0x29, 0x91, 0xc2, 0xfd, 0x88, 0x98, 0x12, 0xa9, 0xfe, + 0x64, 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x3b, 0x33, 0x0d, 0x77, 0x5a, 0xee, 0xfb, + 0x44, 0x05, 0x40, 0x29, 0x0b, 0x53, 0x6f, 0x51, 0x7a, 0x74, 0x50, 0x1e, 0x57, 0xff, 0x78, 0x94, + 0xac, 0xb8, 0x8a, 0x7d, 0x03, 0x26, 0x13, 0x03, 0x86, 0x5e, 0x86, 0xa1, 0xf6, 0xb6, 0x13, 0x92, + 0x84, 0xf9, 0xe4, 0x50, 0x8d, 0x16, 0x1e, 0x1d, 0x94, 0x27, 0x54, 0x05, 0x56, 0x82, 0x39, 0xb6, + 0xfd, 0x3f, 0x2d, 0x28, 0xac, 0xf9, 0xcd, 0xd3, 0x58, 0x4c, 0x6f, 0x18, 0x8b, 0xe9, 0x89, 0xac, + 0x18, 0x83, 0x99, 0xeb, 0x68, 0x25, 0xb1, 0x8e, 0x2e, 0x65, 0x52, 0xe8, 0xbd, 0x84, 0x76, 0x61, + 0x94, 0x45, 0x2e, 0x14, 0xe6, 0x9c, 0x2f, 0x1a, 0x32, 0x40, 0x39, 0x21, 0x03, 0x4c, 0x6a, 0xa8, + 0x9a, 0x24, 0xf0, 0x0c, 0x8c, 0x08, 0x93, 0xc2, 0xa4, 0x11, 0xbc, 0xc0, 0xc5, 0x12, 0x6e, 0xff, + 0x78, 0x1e, 0x8c, 0x48, 0x89, 0xe8, 0x57, 0x2d, 0x98, 0x0b, 0xb8, 0x57, 0x61, 0xb3, 0xd2, 0x09, + 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x4e, 0xcb, 0xf5, 0xb6, 0xaa, 0x5b, 0x9e, 0xaf, 0x8a, + 0x97, 0x1f, 0x90, 0x46, 0x87, 0xbd, 0x0b, 0xf4, 0x09, 0xcb, 0xa8, 0x4c, 0x76, 0xae, 0x1d, 0x1e, + 0x94, 0xe7, 0xf0, 0xb1, 0x68, 0xe3, 0x63, 0xf6, 0x05, 0x7d, 0xdd, 0x82, 0x79, 0x1e, 0x40, 0x70, + 0xf0, 0xfe, 0xf7, 0x90, 0x98, 0x6a, 0x92, 0x54, 0x4c, 0x64, 0x9d, 0x04, 0xbb, 0x8b, 0xaf, 0x88, + 0x01, 0x9d, 0xaf, 0x1d, 0xaf, 0x2d, 0x7c, 0xdc, 0xce, 0xd9, 0xff, 0x22, 0x0f, 0xe3, 0xc2, 0xa1, + 0x5d, 0x44, 0x4a, 0x79, 0xd9, 0x58, 0x12, 0x4f, 0x26, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x32, 0x41, + 0x52, 0x42, 0x98, 0x6e, 0x39, 0x61, 0x74, 0x83, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0x6e, 0xca, 0x92, + 0x3f, 0xb6, 0xd9, 0x8d, 0x52, 0xd1, 0xdc, 0x4a, 0x12, 0xc3, 0xdd, 0xf4, 0xd1, 0x1e, 0x20, 0x66, + 0x8f, 0x13, 0x38, 0x5e, 0xc8, 0xbf, 0xc5, 0x15, 0x6f, 0x06, 0xc7, 0x6b, 0x75, 0x56, 0xb4, 0x8a, + 0x6e, 0x75, 0x51, 0xc3, 0x29, 0x2d, 0x68, 0x76, 0x56, 0x43, 0x83, 0xda, 0x59, 0x0d, 0xf7, 0xf1, + 0x34, 0xf1, 0x60, 0xaa, 0x2b, 0x26, 0xc1, 0xdb, 0x50, 0x52, 0xf6, 0x70, 0xe2, 0xd0, 0xe9, 0x1d, + 0xda, 0x23, 0x49, 0x81, 0xab, 0x51, 0x62, 0x5b, 0xcc, 0x98, 0x9c, 0xfd, 0x8f, 0x72, 0x46, 0x83, + 0x7c, 0x12, 0xd7, 0xa0, 0xe8, 0x84, 0xa1, 0xbb, 0xe5, 0x91, 0xa6, 0xd8, 0xb1, 0x1f, 0xcf, 0xda, + 0xb1, 0x46, 0x33, 0xcc, 0x26, 0x71, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xc1, 0x0d, 0x86, 0xf6, + 0x24, 0xcf, 0x3f, 0x18, 0x35, 0x90, 0x26, 0x45, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x0b, 0xdc, 0xa2, + 0xeb, 0xa6, 0xe7, 0xdf, 0xf7, 0xae, 0xfb, 0xbe, 0xf4, 0x42, 0x1b, 0x8c, 0xe0, 0xb4, 0xb4, 0xe3, + 0x52, 0xd5, 0xb1, 0x49, 0x6d, 0xb0, 0xb8, 0x3d, 0xdf, 0x0d, 0x67, 0x28, 0x69, 0xd3, 0x97, 0x24, + 0x44, 0x04, 0x26, 0x45, 0xb4, 0x04, 0x59, 0x26, 0xc6, 0x2e, 0x95, 0x9d, 0x37, 0x6b, 0xc7, 0x4a, + 0xbf, 0x9b, 0x26, 0x09, 0x9c, 0xa4, 0x69, 0xff, 0x94, 0x05, 0xcc, 0x0a, 0xfe, 0x14, 0x58, 0x86, + 0xcf, 0x9a, 0x2c, 0xc3, 0x4c, 0xd6, 0x20, 0x67, 0x70, 0x0b, 0x2f, 0xf1, 0x95, 0x55, 0x0b, 0xfc, + 0x07, 0xfb, 0xe2, 0x35, 0xbd, 0x3f, 0x27, 0x6b, 0xff, 0x5f, 0x8b, 0x1f, 0x62, 0xca, 0x31, 0x1d, + 0x7d, 0x0f, 0x14, 0x1b, 0x4e, 0xdb, 0x69, 0xf0, 0xb0, 0xbe, 0x99, 0x5a, 0x1d, 0xa3, 0xd2, 0xdc, + 0x92, 0xa8, 0xc1, 0xb5, 0x14, 0x32, 0xea, 0x46, 0x51, 0x16, 0xf7, 0xd5, 0x4c, 0xa8, 0x26, 0x67, + 0x77, 0x60, 0xdc, 0x20, 0xf6, 0x48, 0x45, 0xda, 0xef, 0xe1, 0x57, 0xac, 0x8a, 0x12, 0xb3, 0x0b, + 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0x52, 0x4c, 0xf9, 0x78, 0xbf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, + 0x95, 0x7f, 0x82, 0x0c, 0xee, 0xa6, 0x6c, 0xff, 0x84, 0x05, 0x8f, 0xe9, 0x88, 0x5a, 0xcc, 0x80, + 0x7e, 0x7a, 0xe2, 0x0a, 0x14, 0xfd, 0x36, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x55, 0x0e, + 0xfa, 0x6d, 0x51, 0x7e, 0x24, 0xe2, 0x2b, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, + 0x83, 0x11, 0x8a, 0x78, 0x0e, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x21, 0x16, 0x10, 0xfb, 0x0f, 0x2d, + 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x60, 0x6a, 0xd7, 0x89, 0x1a, 0xdb, 0xcb, 0x0f, 0xda, 0x01, + 0x57, 0x8f, 0xcb, 0x71, 0x7a, 0xb6, 0xdf, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0xa9, 0xad, 0x26, 0x88, + 0xe1, 0x2e, 0xf2, 0x68, 0x03, 0x46, 0x59, 0x19, 0xb3, 0x86, 0x0e, 0x7b, 0xb1, 0x06, 0x59, 0xad, + 0xa9, 0x57, 0xe7, 0xd5, 0x98, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0xce, 0xf3, 0xdd, 0xce, 0xb8, 0xed, + 0x67, 0x60, 0xa4, 0xed, 0x37, 0x97, 0xaa, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, + 0x25, 0x1c, 0xbd, 0x06, 0x40, 0x1e, 0x44, 0x24, 0xf0, 0x9c, 0x96, 0x32, 0x1a, 0x51, 0x66, 0x92, + 0x15, 0x7f, 0xcd, 0x8f, 0xee, 0x84, 0xe4, 0xbb, 0x96, 0x15, 0x0a, 0xd6, 0xd0, 0xd1, 0x35, 0x80, + 0x76, 0xe0, 0xef, 0xb9, 0x4d, 0xe6, 0x5e, 0x97, 0x37, 0x4d, 0x2a, 0x6a, 0x0a, 0x82, 0x35, 0x2c, + 0xf4, 0x1a, 0x8c, 0x77, 0xbc, 0x90, 0x73, 0x28, 0xce, 0x86, 0x88, 0x4e, 0x58, 0x8c, 0xad, 0x1b, + 0xee, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1c, 0x66, 0x13, 0x31, 0x94, 0x6d, 0xdb, + 0xb8, 0x4e, 0x31, 0xf4, 0xa0, 0xb2, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x57, 0x81, 0x9f, + 0xf5, 0xc2, 0xa8, 0x78, 0xb0, 0x7b, 0x41, 0xf3, 0x54, 0x10, 0xc6, 0xca, 0x06, 0x2d, 0xfb, 0xeb, + 0x25, 0x80, 0x98, 0x1d, 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf5, 0x66, 0xe0, 0x4f, 0xee, 0x30, + 0x42, 0xdf, 0x6f, 0xc1, 0xa8, 0xd3, 0x6a, 0xf9, 0x0d, 0x27, 0x62, 0xa3, 0x9c, 0xeb, 0x7d, 0x1e, + 0x8a, 0xf6, 0x17, 0xe2, 0x1a, 0xbc, 0x0b, 0x2f, 0xca, 0x85, 0xa7, 0x41, 0xfa, 0xf6, 0x42, 0x6f, + 0x18, 0x7d, 0x4a, 0x4a, 0x69, 0x7c, 0x79, 0xcc, 0x26, 0xa5, 0xb4, 0x12, 0x3b, 0xfa, 0x35, 0x01, + 0x0d, 0xdd, 0x31, 0x02, 0xcf, 0x15, 0xb2, 0x63, 0x30, 0x18, 0x5c, 0x69, 0xbf, 0x98, 0x73, 0xa8, + 0xa6, 0x3b, 0x57, 0x0d, 0x65, 0x07, 0x2a, 0xd1, 0xc4, 0x9f, 0x3e, 0x8e, 0x55, 0xef, 0xc2, 0x64, + 0xd3, 0xbc, 0xdb, 0xc5, 0x6a, 0x7a, 0x3a, 0x8b, 0x6e, 0x82, 0x15, 0x88, 0x6f, 0xf3, 0x04, 0x00, + 0x27, 0x09, 0xa3, 0x1a, 0x77, 0x73, 0xab, 0x7a, 0x9b, 0xbe, 0x30, 0x4e, 0xb7, 0x33, 0xe7, 0x72, + 0x3f, 0x8c, 0xc8, 0x2e, 0xc5, 0x8c, 0x2f, 0xed, 0x35, 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x4d, 0x18, + 0x66, 0x7e, 0xb2, 0xe1, 0x4c, 0x31, 0x5b, 0x99, 0x68, 0x86, 0x78, 0x88, 0x37, 0x15, 0xfb, 0x1b, + 0x62, 0x41, 0x01, 0xdd, 0x90, 0x71, 0x60, 0xc2, 0xaa, 0x77, 0x27, 0x24, 0x2c, 0x0e, 0x4c, 0x69, + 0xf1, 0xe3, 0x71, 0x88, 0x17, 0x5e, 0x9e, 0x1a, 0x3e, 0xde, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, + 0x46, 0xa5, 0x9f, 0x81, 0xec, 0xee, 0x99, 0x91, 0xeb, 0xe3, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x49, + 0x9a, 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0x98, 0xb7, 0xf7, 0xdb, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, + 0x2f, 0xc1, 0xa2, 0xfe, 0xa9, 0xde, 0xfa, 0xb3, 0x1e, 0x4c, 0x25, 0xb7, 0xe8, 0x23, 0xe5, 0x32, + 0xfe, 0xa0, 0x00, 0x13, 0xe6, 0x92, 0x42, 0xf3, 0x50, 0x12, 0x44, 0x54, 0x50, 0x52, 0xb5, 0x4b, + 0x56, 0x25, 0x00, 0xc7, 0x38, 0x2c, 0x16, 0x2d, 0xab, 0xae, 0x99, 0x25, 0xc6, 0xb1, 0x68, 0x15, + 0x04, 0x6b, 0x58, 0x54, 0x5e, 0xda, 0xf0, 0xfd, 0x48, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, + 0x2c, 0xa0, 0xf4, 0x32, 0xd9, 0x21, 0x81, 0x47, 0x5a, 0x66, 0xac, 0x33, 0x75, 0x99, 0xdc, 0xd4, + 0x81, 0xd8, 0xc4, 0xa5, 0xb7, 0xa4, 0x1f, 0xb2, 0x85, 0x2c, 0xa4, 0xb2, 0xd8, 0xcc, 0xb3, 0xce, + 0x3d, 0xce, 0x25, 0x1c, 0x7d, 0x1e, 0x1e, 0x53, 0x0e, 0xe2, 0x98, 0x2b, 0xaa, 0x65, 0x8b, 0xc3, + 0x86, 0x12, 0xe5, 0xb1, 0xa5, 0x74, 0x34, 0x9c, 0x55, 0x1f, 0xbd, 0x01, 0x13, 0x82, 0x73, 0x97, + 0x14, 0x47, 0x4c, 0xdb, 0x89, 0x9b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xb4, 0x36, 0xc6, 0x3c, 0x4b, + 0x0a, 0xc5, 0xee, 0x68, 0x6d, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc9, 0x59, 0x2b, 0xd7, + 0xdb, 0xe2, 0x73, 0x22, 0xbc, 0x4f, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0a, + 0x63, 0x4e, 0xd0, 0xd8, 0x76, 0x23, 0xd2, 0x88, 0x3a, 0x01, 0x77, 0x4b, 0xd1, 0x8c, 0x4f, 0x16, + 0x34, 0x18, 0x36, 0x30, 0xed, 0xf7, 0xe1, 0x4c, 0x8a, 0xe3, 0x1a, 0x5d, 0x38, 0x4e, 0xdb, 0x95, + 0xdf, 0x94, 0x30, 0xd8, 0x5c, 0xa8, 0x55, 0xe5, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x07, 0x37, + 0x2d, 0x09, 0x85, 0x5a, 0x9d, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x7f, 0xe5, 0x60, 0x32, 0x45, + 0xf9, 0xce, 0x12, 0x21, 0x24, 0x64, 0x8f, 0x38, 0xef, 0x81, 0x19, 0xfc, 0x2f, 0x77, 0x8c, 0xe0, + 0x7f, 0xf9, 0x7e, 0xc1, 0xff, 0x0a, 0x1f, 0x24, 0xf8, 0x9f, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, + 0x4a, 0xc0, 0xc0, 0xe1, 0x63, 0x06, 0x0c, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0xe1, 0x1c, + 0x4c, 0x25, 0x8d, 0xe4, 0x4e, 0x41, 0x1d, 0xfb, 0xa6, 0xa1, 0x8e, 0x4d, 0x4f, 0x2b, 0x92, 0x34, + 0xdd, 0xcb, 0x52, 0xcd, 0xe2, 0x84, 0x6a, 0xf6, 0x93, 0x03, 0x51, 0xeb, 0xad, 0xa6, 0xfd, 0x3b, + 0x39, 0x38, 0x97, 0xac, 0xb2, 0xd4, 0x72, 0xdc, 0xdd, 0x53, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0x3c, + 0x3f, 0xc8, 0xd7, 0xb0, 0xae, 0x65, 0x0e, 0xd0, 0xbd, 0xc4, 0x00, 0xcd, 0x0f, 0x4e, 0xb2, 0xf7, + 0x28, 0x7d, 0x23, 0x0f, 0x97, 0x52, 0xeb, 0xc5, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa1, + 0xcd, 0xb4, 0x7b, 0xd7, 0x3e, 0x19, 0xf5, 0xa6, 0xf0, 0x28, 0x64, 0x01, 0xe2, 0x1e, 0x52, 0xb5, + 0x69, 0x78, 0x14, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xcd, 0xa4, 0xd2, 0xfc, 0x57, 0x16, 0x5c, 0x48, + 0x9d, 0x9b, 0x53, 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, 0x19, 0x3a, 0xad, + 0xdf, 0x2c, 0x64, 0x7c, 0x0b, 0x13, 0xd0, 0x6f, 0xc3, 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf5, + 0x9b, 0x2a, 0x60, 0xda, 0xf3, 0x4c, 0xce, 0x8a, 0x8b, 0x8f, 0x0e, 0xca, 0xb3, 0x49, 0x12, 0x31, + 0x18, 0xeb, 0x14, 0xcc, 0x18, 0x8f, 0xb9, 0x13, 0x8d, 0xf1, 0x78, 0x0d, 0x60, 0x4f, 0x71, 0xeb, + 0x49, 0x21, 0x5f, 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x05, 0x28, 0x86, 0xe2, 0x1a, 0x17, 0x4b, 0xf1, + 0xc5, 0x01, 0xe7, 0xca, 0xd9, 0x20, 0x2d, 0xd3, 0x75, 0x5d, 0xe9, 0x43, 0x14, 0x49, 0xf4, 0x1d, + 0x30, 0x15, 0xf2, 0xc8, 0x28, 0x4b, 0x2d, 0x27, 0x64, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x7f, 0xf4, + 0x7a, 0x02, 0x86, 0xbb, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xc6, 0x85, 0x2f, 0xcc, 0x2b, 0xf1, + 0x07, 0x89, 0x34, 0x4c, 0x67, 0x93, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, + 0x8f, 0xd0, 0x25, 0x8c, 0x64, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0xa6, 0x5a, 0x7e, 0x32, 0x5f, 0xbe, + 0x8a, 0x22, 0x82, 0x35, 0x82, 0xf6, 0x0f, 0x17, 0xe0, 0xf1, 0x1e, 0x67, 0x24, 0x5a, 0x30, 0x9f, + 0x40, 0x9f, 0x4d, 0x0a, 0xd7, 0xb3, 0xa9, 0x95, 0x0d, 0x69, 0x3b, 0xb1, 0x14, 0x73, 0x1f, 0x78, + 0x29, 0xfe, 0x80, 0xa5, 0xa9, 0x3d, 0xb8, 0x31, 0xdf, 0x67, 0x8f, 0x79, 0xf6, 0x9f, 0xa0, 0x1e, + 0x64, 0x33, 0x45, 0x99, 0x70, 0x6d, 0xe0, 0xee, 0x0c, 0xac, 0x5d, 0x38, 0x5d, 0xe5, 0xef, 0x97, + 0x2d, 0x78, 0x32, 0xb5, 0xbf, 0x86, 0xc9, 0xc6, 0x3c, 0x94, 0x1a, 0xb4, 0x50, 0x73, 0xdd, 0x8a, + 0x7d, 0x5a, 0x25, 0x00, 0xc7, 0x38, 0x86, 0x65, 0x46, 0xae, 0xaf, 0x65, 0xc6, 0x3f, 0xb7, 0xa0, + 0x6b, 0x7f, 0x9c, 0xc2, 0x41, 0x5d, 0x35, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x19, 0x67, 0xf4, + 0x7f, 0x9e, 0x84, 0xf3, 0x19, 0xbe, 0x1a, 0x7b, 0x30, 0xbd, 0xd5, 0x20, 0xa6, 0x53, 0x9c, 0xf8, + 0x98, 0x54, 0xff, 0xc1, 0x9e, 0x1e, 0x74, 0x2c, 0x3d, 0xcf, 0x74, 0x17, 0x0a, 0xee, 0x6e, 0x02, + 0x7d, 0xd9, 0x82, 0xb3, 0xce, 0xfd, 0xb0, 0x2b, 0x09, 0xa3, 0x58, 0x33, 0x2f, 0xa5, 0x2a, 0x41, + 0xfa, 0x24, 0x6d, 0xe4, 0xf9, 0x8a, 0xd2, 0xb0, 0x70, 0x6a, 0x5b, 0x08, 0x8b, 0x10, 0x9a, 0x94, + 0x9d, 0xef, 0xe1, 0xb6, 0x99, 0xe6, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xba, 0x0b, + 0xa5, 0x2d, 0xe9, 0xe9, 0x26, 0xae, 0x84, 0xd4, 0x3b, 0x36, 0xd5, 0x1d, 0x8e, 0x3f, 0x4b, 0x2a, + 0x10, 0x8e, 0x49, 0xa1, 0x37, 0x20, 0xef, 0x6d, 0x86, 0xbd, 0x12, 0xfd, 0x24, 0x2c, 0x99, 0xb8, + 0x4b, 0xf4, 0xda, 0x4a, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0xe4, 0x83, 0x8d, 0xa6, 0xd0, 0xdb, 0xa5, + 0x9e, 0xdc, 0x78, 0xb1, 0x92, 0xbe, 0x48, 0x38, 0x25, 0xbc, 0x58, 0xc1, 0x94, 0x04, 0xaa, 0xc1, + 0x10, 0x73, 0x6b, 0x10, 0xb7, 0x40, 0x2a, 0xbf, 0xdb, 0xc3, 0x3d, 0x88, 0xfb, 0x4d, 0x33, 0x04, + 0xcc, 0x09, 0xa1, 0x75, 0x18, 0x6e, 0xb0, 0xa4, 0x30, 0x22, 0x6a, 0xf3, 0xa7, 0x52, 0x35, 0x74, + 0x3d, 0xb2, 0xe5, 0x08, 0x85, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, 0xdb, 0x9b, 0x21, + 0x93, 0xf0, 0xb3, 0xa8, 0xf6, 0x48, 0x02, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, + 0x6e, 0xb3, 0x21, 0xbc, 0x1e, 0x52, 0x55, 0x75, 0xa6, 0x57, 0xfb, 0xe2, 0xf0, 0xe1, 0x41, 0x39, + 0xb7, 0xb2, 0x84, 0x73, 0x9b, 0x0d, 0xb4, 0x06, 0x23, 0x9b, 0xdc, 0x0f, 0x56, 0x68, 0xe3, 0x9e, + 0x4e, 0x77, 0xd1, 0xed, 0x72, 0x95, 0xe5, 0xd6, 0xfa, 0x02, 0x80, 0x25, 0x11, 0x16, 0x87, 0x52, + 0xf9, 0xf3, 0x8a, 0x80, 0xcc, 0x73, 0xc7, 0xf3, 0xc1, 0xe6, 0xb7, 0x72, 0xec, 0x15, 0x8c, 0x35, + 0x8a, 0xe8, 0x4b, 0x50, 0x72, 0x64, 0xfa, 0x3f, 0x11, 0xb0, 0xe2, 0xc5, 0xd4, 0x8d, 0xd9, 0x3b, + 0x33, 0x22, 0x5f, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, + 0x37, 0x32, 0x8b, 0x5f, 0x91, 0x71, 0x71, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, + 0xd9, 0xc3, 0xde, 0xb2, 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, + 0x3f, 0x22, 0x22, 0x82, 0x73, 0xea, 0xf0, 0xbf, 0xc5, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, + 0x42, 0xb7, 0xba, 0x23, 0x53, 0x6b, 0xb2, 0xc8, 0xcd, 0x19, 0x5b, 0x3d, 0x35, 0xff, 0xa6, 0x36, + 0x28, 0xec, 0x8c, 0x8c, 0x49, 0xb1, 0xb3, 0xb1, 0xbd, 0xed, 0x47, 0xbe, 0x97, 0x38, 0x97, 0xa7, + 0xb3, 0xcf, 0xc6, 0x5a, 0x0a, 0x7e, 0xf7, 0xd9, 0x98, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x4d, 0x98, + 0x68, 0xfb, 0x41, 0x74, 0xdf, 0x0f, 0xe4, 0xfa, 0x42, 0x3d, 0xb4, 0x09, 0x06, 0xa6, 0x68, 0x91, + 0x45, 0x14, 0x37, 0x21, 0x38, 0x41, 0x13, 0x7d, 0x0e, 0x46, 0xc2, 0x86, 0xd3, 0x22, 0xd5, 0xdb, + 0x33, 0x67, 0xb2, 0x2f, 0x9d, 0x3a, 0x47, 0xc9, 0x58, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, + 0xd0, 0x0a, 0x0c, 0xb1, 0xb4, 0x00, 0x2c, 0xf8, 0x74, 0x46, 0x60, 0xa4, 0x2e, 0xbb, 0x52, 0x7e, + 0x36, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x98, 0x6a, 0x3f, 0x9c, 0x39, 0x97, 0xbd, 0x07, + 0x04, 0x2f, 0x7e, 0xbb, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, 0x33, 0x3d, 0x4d, + 0xcf, 0xf7, 0x30, 0x63, 0xc9, 0x3c, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, 0xc2, 0xfe, 0xbd, + 0x91, 0x6e, 0x4e, 0x85, 0x89, 0x61, 0x7f, 0xd1, 0xea, 0x7a, 0xa1, 0xfb, 0xf4, 0xa0, 0x5a, 0xa1, + 0x13, 0xe4, 0x51, 0xbf, 0x6c, 0xc1, 0xf9, 0x76, 0xea, 0x87, 0x88, 0x6b, 0x7f, 0x30, 0xe5, 0x12, + 0xff, 0x74, 0x15, 0x20, 0x3e, 0x1d, 0x8e, 0x33, 0x5a, 0x4a, 0xca, 0x01, 0xf9, 0x0f, 0x2c, 0x07, + 0xac, 0x42, 0x91, 0xb1, 0x96, 0x7d, 0x92, 0xa4, 0x25, 0xc5, 0x21, 0xc6, 0x40, 0x2c, 0x89, 0x8a, + 0x58, 0x91, 0x40, 0x3f, 0x68, 0xc1, 0xc5, 0x64, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x4e, 0x9d, 0x4b, + 0x80, 0x2b, 0xe2, 0xfb, 0x2f, 0xd6, 0x7a, 0x21, 0x1f, 0xf5, 0x43, 0xc0, 0xbd, 0x1b, 0x43, 0x95, + 0x14, 0x11, 0x74, 0xd8, 0x54, 0xbb, 0x0f, 0x20, 0x86, 0xbe, 0x04, 0x63, 0xbb, 0x7e, 0xc7, 0x8b, + 0x84, 0xd5, 0x8b, 0xf0, 0x53, 0x64, 0xcf, 0xcc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0x12, 0xc2, 0x6b, + 0xf1, 0xa1, 0x85, 0xd7, 0x77, 0x12, 0xa9, 0xb0, 0x4b, 0xd9, 0x61, 0xfb, 0x84, 0x9c, 0x7f, 0x8c, + 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0x8c, 0x95, 0xc2, 0xca, 0x73, 0x19, 0xf9, 0x75, 0x53, 0x46, + 0xbe, 0x92, 0x94, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, 0x41, 0x83, 0xa9, + 0xd9, 0x2d, 0xb8, 0xdc, 0xef, 0x5a, 0x62, 0xe6, 0x4f, 0x4d, 0xf5, 0xc0, 0x16, 0x9b, 0x3f, 0x35, + 0xab, 0x15, 0xcc, 0x20, 0x83, 0x46, 0xdb, 0xb0, 0xff, 0x9b, 0x05, 0xf9, 0x9a, 0xdf, 0x3c, 0x05, + 0x15, 0xf2, 0x67, 0x0d, 0x15, 0xf2, 0xe3, 0x19, 0x29, 0xca, 0x33, 0x15, 0xc6, 0xcb, 0x09, 0x85, + 0xf1, 0xc5, 0x2c, 0x02, 0xbd, 0xd5, 0xc3, 0x3f, 0x99, 0x07, 0x3d, 0xa1, 0x3a, 0xfa, 0xcd, 0x87, + 0xb1, 0x3d, 0xce, 0xf7, 0xca, 0xb1, 0x2e, 0x28, 0x33, 0xab, 0x29, 0xe9, 0x7a, 0xf7, 0x67, 0xcc, + 0x04, 0xf9, 0x1e, 0x71, 0xb7, 0xb6, 0x23, 0xd2, 0x4c, 0x7e, 0xce, 0xe9, 0x99, 0x20, 0xff, 0x17, + 0x0b, 0x26, 0x13, 0xad, 0xa3, 0x16, 0x8c, 0xb7, 0x74, 0xfd, 0x9f, 0x58, 0xa7, 0x0f, 0xa5, 0x3a, + 0x14, 0x26, 0x9c, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x39, 0x00, 0xf5, 0x3e, 0x27, 0xf5, 0x5e, 0x8c, + 0xeb, 0x57, 0x0f, 0x78, 0x21, 0xd6, 0x30, 0xd0, 0xcb, 0x30, 0x1a, 0xf9, 0x6d, 0xbf, 0xe5, 0x6f, + 0xed, 0xdf, 0x24, 0x32, 0xbe, 0x8b, 0x32, 0xcc, 0x5a, 0x8f, 0x41, 0x58, 0xc7, 0xb3, 0x7f, 0x3a, + 0x0f, 0xc9, 0x24, 0xfc, 0xdf, 0x5a, 0x93, 0x1f, 0xcd, 0x35, 0xf9, 0x0d, 0x0b, 0xa6, 0x68, 0xeb, + 0xcc, 0x48, 0x44, 0x5e, 0xb6, 0x2a, 0x07, 0x8d, 0xd5, 0x23, 0x07, 0xcd, 0x15, 0x7a, 0x76, 0x35, + 0xfd, 0x4e, 0x24, 0xf4, 0x66, 0xda, 0xe1, 0x44, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x04, 0xc2, + 0xf3, 0x49, 0xc7, 0x23, 0x41, 0x80, 0x05, 0x54, 0xa6, 0xa8, 0x29, 0x64, 0xa4, 0xa8, 0x61, 0xd1, + 0xea, 0x84, 0x39, 0x81, 0x60, 0x7b, 0xb4, 0x68, 0x75, 0xd2, 0xce, 0x20, 0xc6, 0xb1, 0x7f, 0x3e, + 0x0f, 0x63, 0x35, 0xbf, 0x19, 0xbf, 0x90, 0xbd, 0x64, 0xbc, 0x90, 0x5d, 0x4e, 0xbc, 0x90, 0x4d, + 0xe9, 0xb8, 0xdf, 0x7a, 0x0f, 0xfb, 0xb0, 0xde, 0xc3, 0xfe, 0x99, 0xc5, 0x66, 0xad, 0xb2, 0x56, + 0x17, 0x29, 0x72, 0x5f, 0x80, 0x51, 0x76, 0x20, 0x31, 0x57, 0x3b, 0xf9, 0x6c, 0xc4, 0xa2, 0xcf, + 0xaf, 0xc5, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa1, 0x18, 0x12, 0x27, 0x68, 0x6c, 0xab, 0x33, 0x4e, + 0x3c, 0xaa, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0x50, 0x5a, 0x3e, 0x3b, 0xd9, 0xab, 0xde, + 0x1f, 0xbe, 0x45, 0xb2, 0xa3, 0xa3, 0xd9, 0xf7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x12, 0xa9, 0x6c, + 0x86, 0x44, 0x2a, 0x75, 0x85, 0x43, 0xfa, 0x13, 0x0b, 0x26, 0x6a, 0x7e, 0x93, 0x6e, 0xdd, 0x6f, + 0xa6, 0x7d, 0xaa, 0x47, 0x89, 0x1c, 0xee, 0x11, 0x25, 0xf2, 0xef, 0x5a, 0x30, 0x52, 0xf3, 0x9b, + 0xa7, 0xa0, 0x6d, 0x7f, 0xdd, 0xd4, 0xb6, 0x3f, 0x96, 0xb1, 0x24, 0x32, 0x14, 0xec, 0xbf, 0x98, + 0x87, 0x71, 0xda, 0x4f, 0x7f, 0x4b, 0xce, 0x92, 0x31, 0x22, 0xd6, 0x00, 0x23, 0x42, 0xd9, 0x5c, + 0xbf, 0xd5, 0xf2, 0xef, 0x27, 0x67, 0x6c, 0x85, 0x95, 0x62, 0x01, 0x45, 0xcf, 0x41, 0xb1, 0x1d, + 0x90, 0x3d, 0xd7, 0x17, 0xfc, 0xa3, 0xf6, 0x76, 0x51, 0x13, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, + 0x74, 0xbd, 0x06, 0x91, 0x99, 0xa6, 0x0b, 0x2c, 0x19, 0x15, 0x0f, 0xff, 0xac, 0x95, 0x63, 0x03, + 0x0b, 0xdd, 0x83, 0x12, 0xfb, 0xcf, 0x4e, 0x94, 0xe3, 0x27, 0xcf, 0x11, 0x39, 0x17, 0x04, 0x01, + 0x1c, 0xd3, 0x42, 0xd7, 0x00, 0x22, 0x19, 0x22, 0x38, 0x14, 0x91, 0x6d, 0x14, 0xaf, 0xad, 0x82, + 0x07, 0x87, 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x29, 0x72, 0xdc, 0xd6, 0x2d, 0xd7, 0x23, 0x21, 0x53, + 0x39, 0xe7, 0x65, 0x4a, 0x05, 0x51, 0x88, 0x63, 0x38, 0xe5, 0x75, 0x98, 0xdb, 0x37, 0x4f, 0xbd, + 0x55, 0x64, 0xd8, 0x8c, 0xd7, 0xb9, 0xa5, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0a, 0xe7, 0x6a, 0x7e, + 0xb3, 0xe6, 0x07, 0xd1, 0x8a, 0x1f, 0xdc, 0x77, 0x82, 0xa6, 0x9c, 0xbf, 0xb2, 0x8c, 0xee, 0x4f, + 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0xb8, 0xfd, 0x2f, 0x32, 0x6e, 0xe7, 0x98, 0xae, 0x1c, 0x0d, + 0x76, 0xef, 0xaa, 0x2c, 0x7b, 0xd7, 0x9d, 0x88, 0xa0, 0xdb, 0x2c, 0x33, 0x57, 0x7c, 0x05, 0x89, + 0xea, 0xcf, 0x68, 0x99, 0xb9, 0x62, 0x60, 0xea, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, 0x2d, 0xcf, 0x4e, + 0xa3, 0x44, 0xd2, 0x39, 0xf4, 0x45, 0x98, 0x08, 0xc9, 0x2d, 0xd7, 0xeb, 0x3c, 0x90, 0x42, 0x78, + 0x0f, 0x67, 0x9c, 0xfa, 0xb2, 0x8e, 0xc9, 0x55, 0x79, 0x66, 0x19, 0x4e, 0x50, 0xa3, 0xf3, 0x14, + 0x74, 0xbc, 0x85, 0xf0, 0x4e, 0x48, 0x02, 0x91, 0xf4, 0x8c, 0xcd, 0x13, 0x96, 0x85, 0x38, 0x86, + 0xd3, 0x75, 0xc9, 0xfe, 0xac, 0xf9, 0x1e, 0xf6, 0xfd, 0x48, 0xae, 0x64, 0x96, 0x36, 0x47, 0x2b, + 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x76, 0xda, 0xed, 0x16, 0x7b, 0xce, 0x77, 0x5a, 0xd7, 0x03, + 0xbf, 0xd3, 0xe6, 0x6f, 0x9d, 0xf9, 0xc5, 0xf3, 0xf4, 0x0a, 0xab, 0x77, 0x41, 0x71, 0x4a, 0x0d, + 0x7a, 0xfa, 0x6c, 0x86, 0xec, 0x37, 0x5b, 0xdd, 0x79, 0xa1, 0x5e, 0xaf, 0xb3, 0x22, 0x2c, 0x61, + 0x74, 0x31, 0xb1, 0xe6, 0x39, 0xe6, 0x70, 0xbc, 0x98, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, 0x5a, 0x86, + 0x91, 0x70, 0x3f, 0x6c, 0x44, 0x22, 0x0e, 0x53, 0x46, 0xfa, 0xca, 0x3a, 0x43, 0xd1, 0x52, 0x2a, + 0xf0, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x87, 0x5d, 0x86, 0x2c, 0x45, 0x56, 0xd4, 0x09, 0x08, 0xda, + 0x85, 0xf1, 0x36, 0x9b, 0x72, 0x11, 0xc0, 0x59, 0xcc, 0xdb, 0x4b, 0x03, 0x4a, 0xb5, 0xf7, 0xe9, + 0x41, 0xa3, 0xb4, 0x4e, 0x4c, 0x5c, 0xa8, 0xe9, 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xd7, 0xd3, 0xec, + 0xcc, 0xad, 0x73, 0x51, 0x75, 0x44, 0x18, 0x14, 0x0b, 0xbe, 0x7c, 0x36, 0x5b, 0x67, 0x12, 0x7f, + 0x91, 0x30, 0x4a, 0xc6, 0xb2, 0x2e, 0x7a, 0x8b, 0xbd, 0x4d, 0xf3, 0x83, 0xae, 0x5f, 0xa6, 0x62, + 0x8e, 0x65, 0x3c, 0x43, 0x8b, 0x8a, 0x58, 0x23, 0x82, 0x6e, 0xc1, 0xb8, 0xc8, 0xa8, 0x24, 0x94, + 0x62, 0x79, 0x43, 0xe9, 0x31, 0x8e, 0x75, 0xe0, 0x51, 0xb2, 0x00, 0x9b, 0x95, 0xd1, 0x16, 0x5c, + 0xd4, 0xd2, 0x0b, 0x5e, 0x0f, 0x1c, 0xf6, 0x5e, 0xe9, 0xb2, 0x4d, 0xa4, 0x9d, 0x9b, 0x4f, 0x1e, + 0x1e, 0x94, 0x2f, 0xae, 0xf7, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0xdb, 0x70, 0x8e, 0xfb, 0xed, 0x55, + 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x3a, 0x98, 0xf9, 0x3a, 0xbc, 0x70, 0x78, 0x50, 0x3e, 0xb7, 0x90, + 0x86, 0x80, 0xd3, 0xeb, 0xa1, 0xd7, 0xa1, 0xd4, 0xf4, 0x42, 0x31, 0x06, 0xc3, 0x46, 0xe6, 0xcc, + 0x52, 0x65, 0xad, 0xae, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x8b, 0x2b, 0xc6, 0x94, 0x1c, + 0x3a, 0x92, 0x9d, 0x25, 0x5d, 0x2c, 0x09, 0xc3, 0x73, 0x87, 0x6b, 0x84, 0x95, 0xe5, 0xab, 0xe1, + 0xd4, 0x63, 0x10, 0x46, 0x6f, 0x02, 0xa2, 0x8c, 0x9a, 0xdb, 0x20, 0x0b, 0x0d, 0x16, 0x47, 0x9b, + 0xe9, 0x11, 0x8b, 0x86, 0xa7, 0x04, 0xaa, 0x77, 0x61, 0xe0, 0x94, 0x5a, 0xe8, 0x06, 0x3d, 0xc8, + 0xf4, 0x52, 0x61, 0xc1, 0x2b, 0x99, 0xfb, 0x99, 0x0a, 0x69, 0x07, 0xa4, 0xe1, 0x44, 0xa4, 0x69, + 0x52, 0xc4, 0x89, 0x7a, 0xf4, 0x2e, 0x55, 0x29, 0x75, 0xc0, 0x0c, 0x96, 0xd1, 0x9d, 0x56, 0x87, + 0xca, 0xc5, 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0xbe, 0x1f, 0xec, 0x88, 0xd8, 0x64, 0x71, 0x98, + 0xcc, 0x18, 0x84, 0x75, 0x3c, 0xca, 0x07, 0xb3, 0xc7, 0xe1, 0x6a, 0x85, 0xbd, 0xd0, 0x15, 0xe3, + 0x7d, 0x72, 0x83, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xd5, 0xda, 0x12, 0x7b, 0x6d, 0x4b, 0xa0, 0x56, + 0x6b, 0x4b, 0x58, 0xc2, 0x11, 0xe9, 0xce, 0x4a, 0x3a, 0x91, 0xad, 0xd5, 0xec, 0xbe, 0x0e, 0x06, + 0x4c, 0x4c, 0xea, 0xc1, 0x94, 0xca, 0x87, 0xca, 0x83, 0xb6, 0x85, 0x33, 0x93, 0x6c, 0x91, 0x0c, + 0x1e, 0xf1, 0x4d, 0xe9, 0x89, 0xab, 0x09, 0x4a, 0xb8, 0x8b, 0xb6, 0x11, 0xbe, 0x64, 0xaa, 0x6f, + 0x4a, 0xa4, 0x79, 0x28, 0x85, 0x9d, 0x8d, 0xa6, 0xbf, 0xeb, 0xb8, 0x1e, 0x7b, 0x1c, 0xd3, 0x98, + 0xac, 0xba, 0x04, 0xe0, 0x18, 0x07, 0xad, 0x40, 0xd1, 0x91, 0x4a, 0x60, 0x94, 0x1d, 0xab, 0x40, + 0xa9, 0x7e, 0xb9, 0xfb, 0xae, 0x54, 0xfb, 0xaa, 0xba, 0xe8, 0x35, 0x18, 0x17, 0xde, 0x5a, 0x3c, + 0x82, 0x03, 0x7b, 0xbc, 0xd2, 0xcc, 0xf1, 0xeb, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x0b, 0x30, 0x41, + 0xa9, 0xc4, 0x07, 0xdb, 0xcc, 0xd9, 0x41, 0x4e, 0x44, 0x2d, 0xd5, 0x85, 0x5e, 0x19, 0x27, 0x88, + 0xa1, 0x26, 0x3c, 0xe1, 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, + 0xbd, 0x61, 0x15, 0x17, 0x2f, 0x1f, 0x1e, 0x94, 0x9f, 0x58, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, + 0x77, 0x60, 0x34, 0xf2, 0x5b, 0xcc, 0x30, 0x9e, 0xb2, 0x12, 0xe7, 0xb3, 0xc3, 0xff, 0xac, 0x2b, + 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x05, 0x46, 0x25, 0xe1, + 0xcc, 0x63, 0xd9, 0x03, 0xa3, 0xe2, 0xa7, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, + 0xa6, 0xdb, 0x81, 0xeb, 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xec, 0x06, 0xb5, 0x24, 0x02, + 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x73, 0x81, 0xa7, 0xca, 0xe2, 0x8c, 0x37, 0x2f, 0xc3, + 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x18, 0x0f, 0xba, 0xd8, 0xc8, + 0xf9, 0x25, 0xf5, 0x17, 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, + 0xf0, 0xce, 0x70, 0x9b, 0xfc, 0xc7, 0x79, 0xfc, 0x46, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, + 0x3d, 0xd4, 0xd4, 0x32, 0x44, 0x53, 0x36, 0x34, 0x9c, 0x79, 0xa2, 0x87, 0x99, 0x51, 0x82, 0x67, + 0x8d, 0xd7, 0xa2, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1d, 0x30, 0x25, 0xc2, 0x1d, 0xc5, 0xe3, + 0x7e, 0x31, 0xb6, 0x5f, 0xc4, 0x09, 0x18, 0xee, 0xc2, 0x9e, 0xfd, 0x76, 0x98, 0xee, 0xba, 0x71, + 0x8e, 0x15, 0x7c, 0xfc, 0x8f, 0x87, 0xa0, 0xa4, 0x94, 0xe9, 0x68, 0xde, 0x7c, 0x23, 0xb9, 0x90, + 0x7c, 0x23, 0x29, 0x52, 0x9e, 0x5e, 0x7f, 0x16, 0x59, 0x37, 0xcc, 0xea, 0x72, 0xd9, 0xa9, 0xbe, + 0x74, 0xae, 0xbc, 0xaf, 0x8b, 0x9e, 0xa6, 0x1b, 0xc9, 0x0f, 0xfc, 0xd8, 0x52, 0xe8, 0xa9, 0x6e, + 0x19, 0x30, 0xd3, 0x2e, 0x7a, 0x8a, 0x0a, 0x36, 0xcd, 0x6a, 0x2d, 0x99, 0x7a, 0xb2, 0x46, 0x0b, + 0x31, 0x87, 0x31, 0x01, 0x90, 0xb2, 0x47, 0x4c, 0x00, 0x1c, 0x79, 0x48, 0x01, 0x50, 0x12, 0xc0, + 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x35, 0x54, 0xb9, 0xe5, 0x3d, 0xd5, 0x37, 0x7f, 0x67, + 0x47, 0x4b, 0xd1, 0xb6, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf8, 0x9e, 0x1f, 0xb2, + 0xc5, 0x24, 0x78, 0x04, 0xe9, 0xbe, 0x54, 0x7c, 0xeb, 0x76, 0x9d, 0x95, 0x1f, 0x1d, 0x94, 0x47, + 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0x7a, 0x00, 0xe7, 0x8c, 0x93, 0x55, 0x75, 0x17, 0x06, + 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, 0xae, 0x3c, 0x5f, + 0x64, 0xdc, 0x95, 0x7c, 0x08, 0x63, 0x37, 0x4a, 0xba, 0xf3, 0x7a, 0x02, 0x01, 0x77, 0xd7, 0xb1, + 0x7f, 0x85, 0xbf, 0x3d, 0x08, 0x0d, 0x25, 0x09, 0x3b, 0xad, 0xd3, 0x48, 0xe8, 0xb4, 0x6c, 0x28, + 0x4f, 0x1f, 0xfa, 0x7d, 0xeb, 0x37, 0x2c, 0xf6, 0xbe, 0xb5, 0x4e, 0x76, 0xdb, 0x2d, 0x2a, 0x27, + 0x3f, 0xfa, 0x8e, 0xbf, 0x05, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x07, 0x95, 0xd6, 0x29, 0xf6, 0xc6, + 0xa7, 0x38, 0x14, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf0, 0x19, 0x90, 0x90, 0x53, 0x50, 0x64, + 0x55, 0x4c, 0x45, 0x56, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xb4, 0xfe, 0xb1, 0xd9, 0x6f, 0x26, 0x0c, + 0x7e, 0xd4, 0x1f, 0x56, 0xed, 0x1f, 0xb1, 0xe0, 0x6c, 0x9a, 0x25, 0x12, 0xe5, 0x2a, 0xb9, 0x28, + 0xaa, 0x1e, 0x9a, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0xde, 0xe1, 0x78, 0xf1, + 0xdd, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x06, 0xf7, 0x83, 0xe3, 0xfd, 0x79, 0xee, + 0xd8, 0x3e, 0x70, 0xf6, 0xcf, 0xe6, 0xe0, 0x2c, 0x7f, 0x29, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, + 0x6f, 0x8a, 0xd4, 0x1c, 0x6f, 0xc3, 0x58, 0x5b, 0xd3, 0x1f, 0xf4, 0x8a, 0x30, 0xa5, 0xeb, 0x19, + 0x62, 0x39, 0x4e, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, 0xd4, 0x73, 0x43, + 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x8f, 0x20, 0x5b, 0x9b, 0xfd, + 0xa3, 0x16, 0x3c, 0x96, 0x11, 0x8f, 0x8a, 0x36, 0x77, 0x9f, 0xbd, 0xc9, 0x89, 0xc4, 0x4f, 0xaa, + 0x39, 0xfe, 0x52, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xb4, 0x51, 0xb1, 0xa6, 0x5f, 0xe0, + 0x1e, 0x23, 0xe6, 0x88, 0x16, 0x2b, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0xa7, 0xf2, 0x30, 0xc4, + 0x5e, 0x76, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x08, 0xcd, 0x83, 0x04, 0x83, 0x8e, 0xe5, 0x43, 0x5e, + 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0x84, 0xeb, 0x56, 0x85, 0xb4, 0x9c, 0x7d, 0xa9, 0x66, + 0xe0, 0xc9, 0x92, 0x54, 0xdc, 0x8b, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0xde, 0x80, 0x09, 0xca, + 0x97, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb1, 0xad, 0x15, 0x23, 0xb8, 0x6e, 0x40, 0x71, 0x02, 0x9b, + 0x0a, 0x4c, 0xed, 0x2e, 0x85, 0xca, 0x50, 0x2c, 0x30, 0x99, 0x4a, 0x14, 0x13, 0x97, 0x99, 0x20, + 0x75, 0x98, 0xc1, 0xd5, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xb9, 0xb6, 0x63, 0x13, + 0xa4, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, 0x4c, 0x65, 0xd8, + 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xc9, 0xaf, 0xa5, 0x37, 0xbe, + 0xb2, 0x2b, 0x1b, 0x91, 0x7e, 0x49, 0x3d, 0xc2, 0xd1, 0x08, 0xcb, 0x1b, 0x95, 0x3e, 0x5b, 0xd3, + 0x03, 0x0a, 0x8f, 0x24, 0x49, 0xe5, 0x61, 0x52, 0x30, 0xff, 0x9e, 0x05, 0x67, 0x52, 0xec, 0x57, + 0xf9, 0x51, 0xb5, 0xe5, 0x86, 0x91, 0x4a, 0x08, 0xa3, 0x1d, 0x55, 0xbc, 0x1c, 0x2b, 0x0c, 0xba, + 0x1f, 0xf8, 0x61, 0x98, 0x3c, 0x00, 0x85, 0x7d, 0x98, 0x80, 0x1e, 0xef, 0x00, 0x44, 0x97, 0xa1, + 0xd0, 0x09, 0x89, 0x0c, 0x24, 0xa5, 0xce, 0x6f, 0xa6, 0x19, 0x66, 0x10, 0xca, 0x9a, 0x6e, 0x29, + 0xa5, 0xac, 0xc6, 0x9a, 0x72, 0x4d, 0x2b, 0x87, 0xd9, 0x5f, 0xcd, 0xc3, 0x85, 0x4c, 0x4b, 0x75, + 0xda, 0xa5, 0x5d, 0xdf, 0x73, 0x23, 0x5f, 0xbd, 0x1a, 0xf2, 0x50, 0x26, 0xa4, 0xbd, 0xbd, 0x2a, + 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x64, 0x1a, 0xf6, 0x64, 0xca, 0x9b, 0xc5, 0x8a, 0x91, 0x89, 0x7d, + 0xd0, 0x74, 0x62, 0x4f, 0x41, 0xa1, 0xed, 0xfb, 0xad, 0xe4, 0x61, 0x44, 0xbb, 0xeb, 0xfb, 0x2d, + 0xcc, 0x80, 0xe8, 0x13, 0x62, 0x1c, 0x12, 0xcf, 0x64, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x18, 0xcf, + 0xc0, 0xc8, 0x0e, 0xd9, 0x0f, 0x5c, 0x6f, 0x2b, 0xf9, 0x7c, 0x7a, 0x93, 0x17, 0x63, 0x09, 0x37, + 0x33, 0x3e, 0x8c, 0x9c, 0x74, 0x1e, 0xb0, 0x62, 0xdf, 0xab, 0xed, 0x07, 0xf2, 0x30, 0x89, 0x17, + 0x2b, 0xdf, 0x9a, 0x88, 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0x79, 0xc0, 0xfa, 0xcf, 0xc6, 0x2f, 0x5a, + 0x30, 0xc9, 0xa2, 0x22, 0x8b, 0x00, 0x1a, 0xae, 0xef, 0x9d, 0x02, 0xeb, 0xf6, 0x14, 0x0c, 0x05, + 0xb4, 0xd1, 0x64, 0x72, 0x1f, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x09, 0x28, 0xb0, 0x2e, 0xd0, 0xc9, + 0x1b, 0xe3, 0x79, 0x11, 0x2a, 0x4e, 0xe4, 0x60, 0x56, 0xca, 0xbc, 0xc2, 0x31, 0x69, 0xb7, 0x5c, + 0xde, 0xe9, 0xf8, 0x49, 0xe2, 0xa3, 0xe1, 0x15, 0x9e, 0xda, 0xb5, 0x0f, 0xe6, 0x15, 0x9e, 0x4e, + 0xb2, 0xb7, 0x58, 0xf4, 0xdf, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0x5e, 0xe1, 0xbd, 0x6b, 0x9f, + 0x8c, 0x15, 0x4c, 0xba, 0x71, 0x4a, 0xfe, 0x14, 0x8d, 0x53, 0x0a, 0x83, 0x72, 0x8e, 0x43, 0x03, + 0x38, 0x6b, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x59, 0x3b, 0xb5, 0x6f, 0x19, 0x62, 0xdd, 0x9f, 0xe6, + 0x32, 0xbe, 0x85, 0x09, 0x78, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x84, 0xc7, 0xf8, 0x19, + 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0x7b, 0xce, 0x65, 0xa7, 0x7e, 0xcc, 0x6c, 0x6a, 0xce, + 0x7c, 0x41, 0x52, 0x43, 0x90, 0xe2, 0x02, 0xbd, 0xaa, 0x09, 0xe5, 0xf9, 0xc1, 0x85, 0xf2, 0xb1, + 0x74, 0x81, 0x1c, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xb1, 0x54, 0xfe, 0x26, 0x2b, 0xaa, 0xa2, 0x80, + 0xac, 0x9a, 0x60, 0x9c, 0xc4, 0x9f, 0x7d, 0x0d, 0xc6, 0x1f, 0x5e, 0x1d, 0xf9, 0x8d, 0x3c, 0x3c, + 0xde, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, 0xef, 0x9a, 0x87, 0x1a, 0x9c, 0xdd, + 0xec, 0xb4, 0x5a, 0xfb, 0xcc, 0xfe, 0x93, 0x34, 0x25, 0x86, 0xe0, 0x15, 0x9f, 0x90, 0x99, 0x28, + 0x56, 0x52, 0x70, 0x70, 0x6a, 0x4d, 0xf4, 0x26, 0x20, 0x5f, 0xe4, 0x9d, 0xbd, 0x4e, 0x3c, 0xa1, + 0x97, 0x67, 0x03, 0x9f, 0x8f, 0x37, 0xe3, 0xed, 0x2e, 0x0c, 0x9c, 0x52, 0x8b, 0x32, 0xfd, 0xf4, + 0x56, 0xda, 0x57, 0xdd, 0x4a, 0x30, 0xfd, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x87, 0x69, 0x67, + 0xcf, 0x71, 0x79, 0x74, 0x3c, 0x49, 0x80, 0x73, 0xfd, 0x4a, 0x09, 0xb6, 0x90, 0x44, 0xc0, 0xdd, + 0x75, 0x12, 0x8e, 0xd1, 0xc3, 0xd9, 0x8e, 0xd1, 0xbd, 0xcf, 0xc5, 0x7e, 0x3a, 0x5d, 0xfb, 0x3f, + 0x58, 0xf4, 0xfa, 0x4a, 0xc9, 0x1d, 0x4f, 0xc7, 0x41, 0xe9, 0x26, 0x35, 0x1f, 0xe5, 0x73, 0x9a, + 0x85, 0x47, 0x0c, 0xc4, 0x26, 0x2e, 0x5f, 0x10, 0x61, 0xec, 0x24, 0x63, 0xb0, 0xee, 0x22, 0xc6, + 0x81, 0xc2, 0x40, 0x9f, 0x87, 0x91, 0xa6, 0xbb, 0xe7, 0x86, 0x7e, 0x20, 0x36, 0xcb, 0x31, 0x5d, + 0x0d, 0xe2, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81, 0x1c, 0x8c, 0xcb, 0x16, 0xdf, + 0xea, 0xf8, 0x91, 0x73, 0x0a, 0xd7, 0xf2, 0x75, 0xe3, 0x5a, 0xfe, 0x44, 0xaf, 0x40, 0x0f, 0xac, + 0x4b, 0x99, 0xd7, 0xf1, 0xed, 0xc4, 0x75, 0xfc, 0x74, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x6a, + 0xc1, 0xb4, 0x81, 0x7f, 0x0a, 0xb7, 0xc1, 0x8a, 0x79, 0x1b, 0x3c, 0xd9, 0xf7, 0x1b, 0x32, 0x6e, + 0x81, 0xef, 0xcb, 0x27, 0xfa, 0xce, 0x4e, 0xff, 0xf7, 0xa0, 0xb0, 0xed, 0x04, 0xcd, 0x5e, 0x01, + 0x65, 0xbb, 0x2a, 0xcd, 0xdd, 0x70, 0x82, 0x26, 0x3f, 0xc3, 0x9f, 0x53, 0xd9, 0x2a, 0x9d, 0xa0, + 0xd9, 0xd7, 0x27, 0x8c, 0x35, 0x85, 0x5e, 0x85, 0xe1, 0xb0, 0xe1, 0xb7, 0x95, 0xc5, 0xe6, 0x65, + 0x9e, 0xc9, 0x92, 0x96, 0x1c, 0x1d, 0x94, 0x91, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, 0xbd, 0x0d, + 0xe3, 0xec, 0x97, 0xb2, 0x5c, 0xc8, 0x67, 0xa7, 0x31, 0xa8, 0xeb, 0x88, 0xdc, 0x00, 0xc6, 0x28, + 0xc2, 0x26, 0xa9, 0xd9, 0x2d, 0x28, 0xa9, 0xcf, 0x7a, 0xa4, 0xbe, 0x3c, 0xff, 0x36, 0x0f, 0x67, + 0x52, 0xd6, 0x1c, 0x0a, 0x8d, 0x99, 0x78, 0x61, 0xc0, 0xa5, 0xfa, 0x01, 0xe7, 0x22, 0x64, 0xd2, + 0x50, 0x53, 0xac, 0xad, 0x81, 0x1b, 0xbd, 0x13, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, + 0xb1, 0x53, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0xca, 0xc3, 0xd9, 0xb4, + 0xd8, 0x33, 0xe8, 0xbb, 0x13, 0x29, 0x6d, 0x5e, 0x1a, 0x34, 0x6a, 0x0d, 0xcf, 0x73, 0x23, 0x12, + 0x34, 0xcf, 0x99, 0x49, 0x6e, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x80, 0x06, 0x3c, 0x15, 0x91, + 0x3c, 0x3e, 0x3e, 0x3d, 0x70, 0x07, 0x44, 0x0e, 0xa3, 0x30, 0xe1, 0x00, 0x2a, 0x8b, 0xfb, 0x3b, + 0x80, 0xca, 0x96, 0x67, 0x5d, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x0e, 0xbd, 0xad, 0xb4, + 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xe6, 0x91, 0x4a, 0xdd, 0x65, 0x65, 0xaa, 0xbb, + 0x2e, 0x43, 0x21, 0xf0, 0x5b, 0x24, 0x99, 0x41, 0x06, 0xfb, 0x2d, 0x82, 0x19, 0x84, 0x62, 0x44, + 0xb1, 0xb2, 0x63, 0x4c, 0x17, 0xe4, 0x84, 0x88, 0xf6, 0x14, 0x0c, 0xb5, 0xc8, 0x1e, 0x69, 0x25, + 0xc3, 0xb3, 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x2c, 0xc0, 0xc5, 0x9e, 0x2e, 0xd4, 0x54, + 0x1c, 0xda, 0x72, 0x22, 0x72, 0xdf, 0xd9, 0x4f, 0xc6, 0x51, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, + 0x8b, 0x71, 0x1e, 0x37, 0x31, 0xa1, 0x1c, 0x14, 0xe1, 0x12, 0x05, 0xf4, 0x11, 0x24, 0xa7, 0xbf, + 0x06, 0x10, 0x86, 0xad, 0x65, 0x8f, 0x72, 0x77, 0x4d, 0x61, 0x8a, 0x1e, 0xc7, 0xd7, 0xac, 0xdf, + 0x12, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb5, 0x03, 0x3f, 0xe2, 0xba, 0xd6, 0x0a, 0x37, 0x14, + 0x1a, 0x32, 0xbd, 0x57, 0x6b, 0x09, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x86, 0x51, 0xe1, 0xd1, 0x5a, + 0xf3, 0xfd, 0x96, 0x50, 0x03, 0x29, 0xb3, 0x93, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, + 0x70, 0x47, 0x52, 0xab, 0x71, 0x25, 0xae, 0x86, 0x97, 0x88, 0x43, 0x55, 0x1c, 0x28, 0x0e, 0x55, + 0xac, 0x18, 0x2b, 0x0d, 0xfc, 0x66, 0x05, 0x7d, 0x55, 0x49, 0x3f, 0x57, 0x80, 0x33, 0x62, 0xe1, + 0x3c, 0xea, 0xe5, 0xf2, 0x88, 0x52, 0xe8, 0x7f, 0x6b, 0xcd, 0x9c, 0xf6, 0x9a, 0xf9, 0x41, 0x0b, + 0x4c, 0xf6, 0x0a, 0xfd, 0xb9, 0xcc, 0x40, 0xf4, 0x2f, 0x67, 0xb2, 0x6b, 0x4d, 0x79, 0x81, 0x7c, + 0xc0, 0x90, 0xf4, 0xf6, 0xbf, 0xb7, 0xe0, 0xc9, 0xbe, 0x14, 0xd1, 0x32, 0x94, 0x18, 0x0f, 0xa8, + 0x49, 0x67, 0x4f, 0x2b, 0x43, 0x42, 0x09, 0xc8, 0x60, 0x49, 0xe3, 0x9a, 0x68, 0xb9, 0x2b, 0xe2, + 0xff, 0x33, 0x29, 0x11, 0xff, 0xcf, 0x19, 0xc3, 0xf3, 0x90, 0x21, 0xff, 0x7f, 0x25, 0x0f, 0xc3, + 0x7c, 0xc5, 0x9f, 0x82, 0x18, 0xb6, 0x22, 0xf4, 0xb6, 0x3d, 0x22, 0x51, 0xf1, 0xbe, 0xcc, 0x55, + 0x9c, 0xc8, 0xe1, 0x6c, 0x82, 0xba, 0xad, 0x62, 0x0d, 0x2f, 0x9a, 0x33, 0xee, 0xb3, 0xd9, 0x84, + 0x62, 0x12, 0x38, 0x0d, 0xed, 0x76, 0xfb, 0x22, 0x40, 0xc8, 0xb2, 0xe5, 0x53, 0x1a, 0x22, 0xa6, + 0xd9, 0x27, 0x7b, 0xb4, 0x5e, 0x57, 0xc8, 0xbc, 0x0f, 0xf1, 0x4e, 0x57, 0x00, 0xac, 0x51, 0x9c, + 0x7d, 0x05, 0x4a, 0x0a, 0xb9, 0x9f, 0x16, 0x67, 0x4c, 0x67, 0x2e, 0x3e, 0x0b, 0x93, 0x89, 0xb6, + 0x8e, 0xa5, 0x04, 0xfa, 0x25, 0x0b, 0x26, 0x79, 0x97, 0x97, 0xbd, 0x3d, 0x71, 0xa6, 0xbe, 0x0f, + 0x67, 0x5b, 0x29, 0x67, 0x9b, 0x98, 0xd1, 0xc1, 0xcf, 0x42, 0xa5, 0xf4, 0x49, 0x83, 0xe2, 0xd4, + 0x36, 0xd0, 0x55, 0xba, 0x6e, 0xe9, 0xd9, 0xe5, 0xb4, 0x84, 0xf7, 0xd1, 0x18, 0x5f, 0xb3, 0xbc, + 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0xdf, 0x24, 0xfb, 0x6a, 0x87, 0x7f, 0x98, + 0x7d, 0x17, 0x49, 0x38, 0x72, 0x19, 0x49, 0x38, 0xf4, 0x4f, 0xcb, 0xf7, 0xfc, 0xb4, 0x9f, 0xb5, + 0x40, 0xac, 0xc0, 0x53, 0x10, 0xe5, 0xbf, 0xdd, 0x14, 0xe5, 0x67, 0xb3, 0x17, 0x75, 0x86, 0x0c, + 0xff, 0x27, 0x16, 0x4c, 0x71, 0x84, 0xf8, 0x2d, 0xf9, 0x43, 0x9d, 0x87, 0x41, 0xb2, 0xe9, 0xa9, + 0x14, 0xdb, 0xe9, 0x1f, 0x65, 0x4c, 0x56, 0xa1, 0xe7, 0x64, 0x35, 0xe5, 0x06, 0x3a, 0x46, 0x26, + 0xc9, 0x63, 0x07, 0xb3, 0xb6, 0xff, 0xd0, 0x02, 0xc4, 0x9b, 0x31, 0xd8, 0x1f, 0xca, 0x54, 0xb0, + 0x52, 0xed, 0xba, 0x88, 0x8f, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0x89, 0x0c, 0x4f, 0xc2, 0x20, 0x20, + 0xdf, 0xdf, 0x20, 0xe0, 0x18, 0x23, 0xfa, 0x7f, 0x0a, 0x90, 0x74, 0x07, 0x40, 0x77, 0x61, 0xac, + 0xe1, 0xb4, 0x9d, 0x0d, 0xb7, 0xe5, 0x46, 0x2e, 0x09, 0x7b, 0x59, 0x12, 0x2d, 0x69, 0x78, 0xe2, + 0xa9, 0x57, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x07, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x45, 0xb6, 0x98, + 0xc6, 0x81, 0xf9, 0x3b, 0x72, 0xf3, 0x18, 0x59, 0x8a, 0x35, 0x8c, 0x14, 0xd7, 0xb5, 0xfc, 0xa3, + 0x73, 0x5d, 0x2b, 0x1c, 0xd3, 0x75, 0x6d, 0x68, 0x20, 0xd7, 0x35, 0x0c, 0xe7, 0x25, 0x8b, 0x44, + 0xff, 0xaf, 0xb8, 0x2d, 0x22, 0xf8, 0x62, 0xee, 0x05, 0x39, 0x7b, 0x78, 0x50, 0x3e, 0x8f, 0x53, + 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, 0x5a, 0x2d, 0xff, 0xbe, 0x1a, 0xb5, 0xe5, 0xb0, + 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xe2, 0xf0, 0xa0, 0x3c, 0xb3, 0x90, 0x81, 0x83, + 0x33, 0x6b, 0x27, 0x3c, 0xdf, 0x8a, 0x7d, 0x3d, 0xdf, 0x5e, 0x87, 0x52, 0x3b, 0xf0, 0x1b, 0xab, + 0x9a, 0x37, 0xce, 0x25, 0x96, 0xa7, 0x5e, 0x16, 0x1e, 0x1d, 0x94, 0xc7, 0xd5, 0x1f, 0x76, 0xc3, + 0xc7, 0x15, 0xec, 0x1d, 0x38, 0x53, 0x27, 0x81, 0xcb, 0x32, 0x60, 0x36, 0xe3, 0x0d, 0xbd, 0x0e, + 0xa5, 0x20, 0x71, 0x84, 0x0d, 0x14, 0x58, 0x49, 0x8b, 0xf2, 0x2b, 0x8f, 0xac, 0x98, 0x90, 0xfd, + 0xc7, 0x16, 0x8c, 0x08, 0x87, 0x86, 0x53, 0xe0, 0x9c, 0x16, 0x0c, 0x05, 0x76, 0x39, 0xfd, 0x98, + 0x67, 0x9d, 0xc9, 0x54, 0x5d, 0x57, 0x13, 0xaa, 0xeb, 0x27, 0x7b, 0x11, 0xe9, 0xad, 0xb4, 0xfe, + 0x9b, 0x79, 0x98, 0x30, 0x9d, 0x39, 0x4e, 0x61, 0x08, 0xd6, 0x60, 0x24, 0x14, 0x9e, 0x43, 0xb9, + 0x6c, 0xcb, 0xe9, 0xe4, 0x24, 0xc6, 0x66, 0x51, 0xc2, 0x57, 0x48, 0x12, 0x49, 0x75, 0x49, 0xca, + 0x3f, 0x42, 0x97, 0xa4, 0x7e, 0xfe, 0x34, 0x85, 0x93, 0xf0, 0xa7, 0xb1, 0xbf, 0xc6, 0xae, 0x1a, + 0xbd, 0xfc, 0x14, 0xb8, 0x90, 0xeb, 0xe6, 0xa5, 0x64, 0xf7, 0x58, 0x59, 0xa2, 0x53, 0x19, 0xdc, + 0xc8, 0x2f, 0x58, 0x70, 0x31, 0xe5, 0xab, 0x34, 0xd6, 0xe4, 0x39, 0x28, 0x3a, 0x9d, 0xa6, 0xab, + 0xf6, 0xb2, 0xf6, 0x8c, 0xb5, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0x1e, 0xb4, 0x5d, + 0xfe, 0x8e, 0xa8, 0xdb, 0x2e, 0xe6, 0x79, 0x88, 0xd9, 0xe5, 0x24, 0x10, 0x77, 0xe3, 0x2b, 0x77, + 0xec, 0x7c, 0xa6, 0x3b, 0xf6, 0x3f, 0xb0, 0x60, 0x54, 0x74, 0xfb, 0x14, 0x46, 0xfb, 0x3b, 0xcc, + 0xd1, 0x7e, 0xbc, 0xc7, 0x68, 0x67, 0x0c, 0xf3, 0xdf, 0xce, 0xa9, 0xfe, 0xd6, 0xfc, 0x20, 0x1a, + 0x80, 0xe5, 0x79, 0x15, 0x8a, 0xed, 0xc0, 0x8f, 0xfc, 0x86, 0xdf, 0x12, 0x1c, 0xcf, 0x13, 0x71, + 0xb4, 0x00, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x9e, 0x1f, 0x44, 0x82, 0xcb, 0x88, + 0x47, 0xcf, 0x0f, 0x22, 0xcc, 0x20, 0xa8, 0x09, 0x10, 0x39, 0xc1, 0x16, 0x89, 0x68, 0x99, 0x08, + 0x3c, 0x92, 0x7d, 0x78, 0x74, 0x22, 0xb7, 0x35, 0xe7, 0x7a, 0x51, 0x18, 0x05, 0x73, 0x55, 0x2f, + 0xba, 0x1d, 0x70, 0x01, 0x4a, 0x73, 0xff, 0x57, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x68, 0xb2, 0x36, + 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x0a, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, + 0xcf, 0x33, 0xff, 0xeb, 0x45, 0x35, 0xb4, 0xec, 0x35, 0xac, 0xa2, 0xfb, 0xff, 0xf7, 0x3e, 0xb9, + 0x69, 0xc3, 0xba, 0x1f, 0x4d, 0x1c, 0x24, 0x00, 0x7d, 0x67, 0x97, 0x9d, 0xc4, 0xf3, 0x7d, 0xae, + 0x80, 0x63, 0x58, 0x46, 0xb0, 0xb0, 0xd7, 0x2c, 0x3c, 0x70, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0xf6, + 0x5a, 0x00, 0x70, 0x8c, 0x83, 0xe6, 0x85, 0xf8, 0x5d, 0x30, 0x92, 0xdf, 0x49, 0xf1, 0x5b, 0x7e, + 0xbe, 0x26, 0x7f, 0xbf, 0x00, 0xa3, 0x2a, 0x09, 0x5e, 0x8d, 0xe7, 0x12, 0x13, 0x61, 0x58, 0x96, + 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x75, 0x98, 0x0c, 0xb9, 0xee, 0x45, 0x45, 0xdb, 0xe3, 0x3a, 0xac, + 0x4f, 0x4a, 0xfb, 0x8a, 0xba, 0x09, 0x3e, 0x62, 0x45, 0xfc, 0xe8, 0x90, 0x8e, 0x96, 0x49, 0x12, + 0xe8, 0x0d, 0x98, 0x68, 0xe9, 0xe9, 0xe6, 0x6b, 0x42, 0xc5, 0xa5, 0xcc, 0x8f, 0x8d, 0x64, 0xf4, + 0x35, 0x9c, 0xc0, 0xa6, 0x9c, 0x92, 0x5e, 0x22, 0x22, 0x44, 0x3a, 0xde, 0x16, 0x09, 0x45, 0x0a, + 0x2f, 0xc6, 0x29, 0xdd, 0xca, 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0xab, 0x30, 0x26, 0x3f, 0x5f, 0x73, + 0x23, 0x8e, 0x8d, 0xdc, 0x35, 0x18, 0x36, 0x30, 0xd1, 0x7d, 0x38, 0x27, 0xff, 0xaf, 0x07, 0xce, + 0xe6, 0xa6, 0xdb, 0x10, 0x5e, 0xdc, 0xdc, 0xd3, 0x67, 0x41, 0xba, 0x0e, 0x2d, 0xa7, 0x21, 0x1d, + 0x1d, 0x94, 0x2f, 0x8b, 0x51, 0x4b, 0x85, 0xb3, 0x49, 0x4c, 0xa7, 0x8f, 0x56, 0xe1, 0xcc, 0x36, + 0x71, 0x5a, 0xd1, 0xf6, 0xd2, 0x36, 0x69, 0xec, 0xc8, 0x4d, 0xc4, 0x9c, 0x93, 0x35, 0xd3, 0xf0, + 0x1b, 0xdd, 0x28, 0x38, 0xad, 0x1e, 0x7a, 0x07, 0x66, 0xda, 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xaf, + 0xf9, 0x11, 0x33, 0xe9, 0x50, 0x39, 0xe4, 0x84, 0x17, 0xb3, 0x72, 0xcc, 0xae, 0x65, 0xe0, 0xe1, + 0x4c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x58, 0x0c, 0xc2, 0xa7, 0x72, 0x22, 0x3b, 0xde, 0x6e, 0x3d, + 0xad, 0x82, 0xf0, 0x91, 0x4c, 0x03, 0xe1, 0xf4, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, + 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0x98, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x25, 0x9d, 0x67, 0xd1, 0x56, + 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0x48, 0xff, 0x3e, 0x74, 0x0b, + 0x8a, 0x8d, 0x96, 0x4b, 0xbc, 0xa8, 0x5a, 0xeb, 0x15, 0xf4, 0x63, 0x49, 0xe0, 0x88, 0x01, 0x13, + 0x01, 0x4a, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0xcf, 0x41, 0xb9, 0x4f, 0xb4, 0xdb, 0x84, 0x3e, + 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x33, 0xe2, 0xad, 0x25, 0x84, 0xf4, 0x44, 0xb6, 0xbb, 0x58, + 0x54, 0x4f, 0xe2, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x0b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, + 0xa1, 0xc1, 0x05, 0x91, 0xcc, 0x67, 0x09, 0xfb, 0x6b, 0x39, 0x38, 0xa7, 0x86, 0xf0, 0x9b, 0x77, + 0xe0, 0xee, 0x74, 0x0f, 0xdc, 0x09, 0x3c, 0xea, 0xd8, 0xb7, 0x61, 0x98, 0x07, 0x4d, 0x19, 0x80, + 0x01, 0x7a, 0xca, 0x8c, 0xb0, 0xa5, 0xae, 0x69, 0x23, 0xca, 0xd6, 0x5f, 0xb2, 0x60, 0x72, 0x7d, + 0xa9, 0x56, 0xf7, 0x1b, 0x3b, 0x24, 0x5a, 0xe0, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, + 0x4d, 0x1a, 0xc7, 0x74, 0x19, 0x0a, 0xdb, 0x7e, 0x18, 0x25, 0x5f, 0x7c, 0x6f, 0xf8, 0x61, 0x84, + 0x19, 0xc4, 0xfe, 0x5d, 0x0b, 0x86, 0x58, 0x1e, 0xd7, 0x7e, 0xc9, 0x85, 0x07, 0xf9, 0x2e, 0xf4, + 0x32, 0x0c, 0x93, 0xcd, 0x4d, 0xd2, 0x88, 0xc4, 0xac, 0x4a, 0x77, 0xd4, 0xe1, 0x65, 0x56, 0x4a, + 0x2f, 0x7d, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa5, 0xc8, 0xdd, 0x25, 0x0b, 0xcd, + 0xa6, 0x78, 0x33, 0x7b, 0x08, 0xef, 0xdf, 0x75, 0x49, 0x00, 0xc7, 0xb4, 0xec, 0xaf, 0xe6, 0x00, + 0x62, 0xd7, 0xff, 0x7e, 0x9f, 0xb8, 0xd8, 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, + 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, + 0xd0, 0x05, 0x66, 0x1c, 0x17, 0x26, 0xa4, 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, + 0x46, 0xd4, 0x94, 0x77, 0x0d, 0x33, 0xc9, 0x3c, 0x46, 0x9e, 0xe9, 0xf8, 0xb9, 0x28, 0x97, 0xf9, + 0x5c, 0xf4, 0x13, 0x16, 0x9c, 0x4d, 0xb6, 0xc3, 0x7c, 0xdf, 0xbe, 0x62, 0xc1, 0x39, 0xf6, 0x68, + 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x29, 0x3d, 0xa4, 0x43, 0xef, 0x1e, 0xc7, 0x7e, 0xcf, 0xab, + 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x57, 0x2c, 0xb8, 0x90, 0x99, 0x3e, 0x08, 0x5d, 0x85, 0xa2, + 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, + 0xad, 0x61, 0x2e, 0x33, 0xad, 0x61, 0xdf, 0x2c, 0x85, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x9d, 0x06, + 0x38, 0x64, 0xde, 0x96, 0x59, 0x61, 0x8d, 0x60, 0xe6, 0x97, 0xb3, 0xfd, 0xbf, 0x44, 0x08, 0x73, + 0x75, 0xa9, 0x1b, 0x81, 0xcb, 0x0d, 0x5a, 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, + 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, 0x1b, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, + 0xfd, 0x43, 0x39, 0x18, 0x95, 0xc1, 0xb3, 0x3b, 0xde, 0x20, 0x92, 0xe5, 0xb1, 0x72, 0xe8, 0xb0, + 0x64, 0xaa, 0x94, 0x70, 0x2d, 0x16, 0xc8, 0xe3, 0x64, 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xf4, 0x0c, + 0x8c, 0x84, 0x9d, 0x0d, 0x86, 0x9e, 0x70, 0xe2, 0xa9, 0xf3, 0x62, 0x2c, 0xe1, 0xe8, 0x73, 0x30, + 0xc5, 0xeb, 0x05, 0x7e, 0xdb, 0xd9, 0xe2, 0xea, 0xcf, 0x21, 0xe5, 0x55, 0x3b, 0xb5, 0x9a, 0x80, + 0x1d, 0x1d, 0x94, 0xcf, 0x26, 0xcb, 0x98, 0xe2, 0xbc, 0x8b, 0x8a, 0xfd, 0x25, 0x40, 0xdd, 0xf1, + 0xc0, 0xd1, 0x9b, 0xdc, 0x94, 0xca, 0x0d, 0x48, 0xb3, 0x97, 0x46, 0x5c, 0x77, 0x02, 0x95, 0x86, + 0xf4, 0xbc, 0x16, 0x56, 0xf5, 0xed, 0xbf, 0x9a, 0x87, 0xa9, 0xa4, 0x4b, 0x20, 0xba, 0x01, 0xc3, + 0xfc, 0xb2, 0x13, 0xe4, 0x7b, 0x3c, 0xb8, 0x6a, 0x8e, 0x84, 0x6c, 0xdb, 0x8b, 0xfb, 0x52, 0xd4, + 0x47, 0xef, 0xc0, 0x68, 0xd3, 0xbf, 0xef, 0xdd, 0x77, 0x82, 0xe6, 0x42, 0xad, 0x2a, 0xd6, 0x65, + 0x2a, 0xcf, 0x5c, 0x89, 0xd1, 0x74, 0xe7, 0x44, 0xf6, 0xb8, 0x10, 0x83, 0xb0, 0x4e, 0x0e, 0xad, + 0xb3, 0x18, 0x87, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xee, 0x65, 0x57, 0xbb, 0x24, 0x91, 0x34, 0xca, + 0xe3, 0x22, 0x10, 0x22, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x6e, 0x38, 0x13, 0x66, 0xa8, 0xd9, 0xb2, + 0xd2, 0x43, 0xf4, 0xd2, 0x3c, 0x2d, 0x3e, 0x46, 0xa5, 0x99, 0x34, 0x85, 0x5c, 0x5a, 0x33, 0xf6, + 0x97, 0xcf, 0x80, 0xb1, 0x1b, 0x8d, 0x1c, 0x41, 0xd6, 0x09, 0xe5, 0x08, 0xc2, 0x50, 0x24, 0xbb, + 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x72, 0xd8, 0x2d, 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, + 0x9d, 0xf4, 0x44, 0x4e, 0xf9, 0x0f, 0x31, 0x91, 0x53, 0xe1, 0x14, 0x13, 0x39, 0xad, 0xc1, 0xc8, + 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x9b, 0x99, 0xba, 0x0e, 0xaf, 0x73, 0x94, 0xee, 0xe4, 0x21, + 0x02, 0x80, 0x25, 0x11, 0xf4, 0xa6, 0xda, 0x81, 0xc3, 0xd9, 0x52, 0x5a, 0xf7, 0xcb, 0x60, 0xea, + 0x1e, 0x14, 0x89, 0x9b, 0x46, 0x1e, 0x36, 0x71, 0xd3, 0x8a, 0x4c, 0xb7, 0x54, 0xcc, 0x36, 0x82, + 0x67, 0xd9, 0x94, 0xfa, 0x24, 0x59, 0x32, 0x12, 0x53, 0x95, 0x4e, 0x2e, 0x31, 0xd5, 0xf7, 0x5b, + 0x70, 0xae, 0x9d, 0x96, 0xa3, 0x4d, 0x24, 0x49, 0x7a, 0x79, 0xe0, 0x24, 0x74, 0x46, 0x83, 0x4c, + 0x5c, 0x4f, 0x45, 0xc3, 0xe9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9a, 0x22, 0xb3, 0xd2, 0x53, 0x19, + 0x19, 0xae, 0x7a, 0xe4, 0xb5, 0x5a, 0x4f, 0xc9, 0xa6, 0xf4, 0xf1, 0xac, 0x6c, 0x4a, 0x03, 0xe7, + 0x50, 0x7a, 0x53, 0xe5, 0xb6, 0x1a, 0xcf, 0x5e, 0x4a, 0x3c, 0x73, 0x55, 0xdf, 0x8c, 0x56, 0x6f, + 0xaa, 0x8c, 0x56, 0x3d, 0x62, 0xbd, 0xf1, 0x7c, 0x55, 0x7d, 0xf3, 0x58, 0x69, 0xb9, 0xa8, 0x26, + 0x4f, 0x26, 0x17, 0x95, 0x71, 0xd5, 0xf0, 0x74, 0x48, 0xcf, 0xf6, 0xb9, 0x6a, 0x0c, 0xba, 0xbd, + 0x2f, 0x1b, 0x9e, 0x77, 0x6b, 0xfa, 0xa1, 0xf2, 0x6e, 0xdd, 0xd5, 0xf3, 0x58, 0xa1, 0x3e, 0x89, + 0x9a, 0x28, 0xd2, 0x80, 0xd9, 0xab, 0xee, 0xea, 0x17, 0xe0, 0x99, 0x6c, 0xba, 0xea, 0x9e, 0xeb, + 0xa6, 0x9b, 0x7a, 0x05, 0x76, 0x65, 0xc5, 0x3a, 0x7b, 0x3a, 0x59, 0xb1, 0xce, 0x9d, 0x78, 0x56, + 0xac, 0xf3, 0xa7, 0x90, 0x15, 0xeb, 0xb1, 0x0f, 0x35, 0x2b, 0xd6, 0xcc, 0x23, 0xc8, 0x8a, 0xb5, + 0x16, 0x67, 0xc5, 0xba, 0x90, 0x3d, 0x25, 0x29, 0x96, 0xb9, 0x19, 0xb9, 0xb0, 0xee, 0xb2, 0xe7, + 0x79, 0x1e, 0xb3, 0x42, 0x04, 0xa3, 0x4b, 0xcf, 0xfb, 0x9b, 0x16, 0xd8, 0x82, 0x4f, 0x89, 0x02, + 0xe1, 0x98, 0x14, 0xa5, 0x1b, 0xe7, 0xc6, 0x7a, 0xbc, 0x87, 0x42, 0x36, 0x4d, 0xd5, 0x95, 0x9d, + 0x11, 0xcb, 0xfe, 0xcb, 0x39, 0xb8, 0xd4, 0x7b, 0x5d, 0xc7, 0x7a, 0xb2, 0x5a, 0xfc, 0xae, 0x93, + 0xd0, 0x93, 0x71, 0x21, 0x27, 0xc6, 0x1a, 0x38, 0xb0, 0xcf, 0x75, 0x98, 0x56, 0x26, 0xb9, 0x2d, + 0xb7, 0xb1, 0xaf, 0xe5, 0x03, 0x56, 0xae, 0x87, 0xf5, 0x24, 0x02, 0xee, 0xae, 0x83, 0x16, 0x60, + 0xd2, 0x28, 0xac, 0x56, 0x84, 0x30, 0xa3, 0x14, 0x73, 0x75, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, + 0x58, 0xf0, 0x58, 0x46, 0xc2, 0x88, 0x81, 0xe3, 0xd6, 0x6c, 0xc2, 0x64, 0xdb, 0xac, 0xda, 0x27, + 0xbc, 0x95, 0x91, 0x96, 0x42, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0x8b, 0x57, 0x7f, 0xeb, 0xf7, + 0x2f, 0x7d, 0xec, 0xb7, 0x7f, 0xff, 0xd2, 0xc7, 0x7e, 0xe7, 0xf7, 0x2f, 0x7d, 0xec, 0xcf, 0x1f, + 0x5e, 0xb2, 0x7e, 0xeb, 0xf0, 0x92, 0xf5, 0xdb, 0x87, 0x97, 0xac, 0xdf, 0x39, 0xbc, 0x64, 0xfd, + 0xde, 0xe1, 0x25, 0xeb, 0xab, 0x7f, 0x70, 0xe9, 0x63, 0x6f, 0xe7, 0xf6, 0x5e, 0xf8, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x20, 0x56, 0xf9, 0x8e, 0x0d, 0xe7, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index d014d0baf..d9a57bd06 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -151,9 +151,6 @@ type VolumeSource struct { // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` - // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). - // +optional - CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -194,7 +191,7 @@ type PersistentVolumeSource struct { // exposed to the pod. Provisioned by an admin. // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional - Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional @@ -251,7 +248,7 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // CSI represents storage that is handled by an external CSI driver (Beta feature). + // CSI represents storage that handled by an external CSI driver (Beta feature). // +optional CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } @@ -329,7 +326,7 @@ type PersistentVolumeSpec struct { MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is a beta feature. + // This is an alpha feature and may change in the future. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -458,7 +455,7 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. - // This is a beta feature. + // This is an alpha feature and may change in the future. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` // This field requires the VolumeSnapshotDataSource alpha feature gate to be @@ -470,7 +467,7 @@ type PersistentVolumeClaimSpec struct { // In the future, we plan to support more data source types and the behavior // of the provisioner may change. // +optional - DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` + DataSource *TypedLocalObjectReference `json:"dataSource" protobuf:"bytes,7,opt,name=dataSource"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -526,7 +523,7 @@ type PersistentVolumeClaimStatus struct { type PersistentVolumeAccessMode string const ( - // can be mounted in read/write mode to exactly 1 host + // can be mounted read/write mode to exactly 1 host ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" // can be mounted in read-only mode to many hosts ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" @@ -639,30 +636,6 @@ type GlusterfsVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsPersistentVolumeSource struct { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` - - // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - - // EndpointsNamespace is the namespace that contains Glusterfs endpoint. - // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - // +optional - EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` -} - // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { @@ -958,11 +931,6 @@ type QuobyteVolumeSource struct { // Default is no group // +optional Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` - - // Tenant owning the given Quobyte volume in the Backend - // Used with dynamically provisioned Quobyte volumes, value is set by the plugin - // +optional - Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"` } // FlexPersistentVolumeSource represents a generic persistent volume resource that is @@ -1094,7 +1062,7 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or its keys must be defined + // Specify whether the Secret or it's keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1520,7 +1488,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or its keys must be defined + // Specify whether the ConfigMap or it's keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1547,7 +1515,7 @@ type ConfigMapProjection struct { // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or its keys must be defined + // Specify whether the ConfigMap or it's keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1672,7 +1640,7 @@ type CSIPersistentVolumeSource struct { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` @@ -1680,7 +1648,7 @@ type CSIPersistentVolumeSource struct { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` @@ -1688,50 +1656,10 @@ type CSIPersistentVolumeSource struct { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` - - // ControllerExpandSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerExpandVolume call. - // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` -} - -// Represents a source location of a volume to mount, managed by an external CSI driver -type CSIVolumeSource struct { - // Driver is the name of the CSI driver that handles this volume. - // Consult with your admin for the correct name as registered in the cluster. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - - // Specifies a read-only configuration for the volume. - // Defaults to false (read/write). - // +optional - ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` - - // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". - // If not provided, the empty value is passed to the associated CSI driver - // which will determine the default filesystem to apply. - // +optional - FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - - // VolumeAttributes stores driver-specific properties that are passed to the CSI - // driver. Consult your driver's documentation for supported values. - // +optional - VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"` - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secret references are passed. - // +optional - NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"` } // ContainerPort represents a network port in a single container. @@ -1780,13 +1708,6 @@ type VolumeMount struct { // This field is beta in 1.10. // +optional MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` - // Expanded path within the volume from which the container's volume should be mounted. - // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - // Defaults to "" (volume's root). - // SubPathExpr and SubPath are mutually exclusive. - // This field is beta in 1.15. - // +optional - SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } // MountPropagationMode describes mount propagation. @@ -1889,7 +1810,7 @@ type ConfigMapKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the ConfigMap or its key must be defined + // Specify whether the ConfigMap or it's key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -1900,7 +1821,7 @@ type SecretKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the Secret or its key must be defined + // Specify whether the Secret or it's key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -2046,16 +1967,6 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) -// PreemptionPolicy describes a policy for if/when to preempt a pod. -type PreemptionPolicy string - -const ( - // PreemptLowerPriority means that pod can preempt other pods with lower priority. - PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" - // PreemptNever means that pod never preempts other pods with lower priority. - PreemptNever PreemptionPolicy = "Never" -) - // TerminationMessagePolicy describes how termination messages are retrieved from a container. type TerminationMessagePolicy string @@ -2149,9 +2060,6 @@ type Container struct { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge - // +listType=map - // +listMapKey=containerPort - // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -2179,7 +2087,7 @@ type Container struct { // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. - // This is a beta feature. + // This is an alpha feature and may change in the future. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -2281,15 +2189,11 @@ type Lifecycle struct { // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` - // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness probe failure, - // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The reason for termination is passed to the - // handler. The Pod's termination grace period countdown begins before the - // PreStop hooked is executed. Regardless of the outcome of the handler, the - // container will eventually terminate within the Pod's termination grace - // period. Other management of the container blocks until the hook completes - // or until the termination grace period is reached. + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` @@ -2420,22 +2324,18 @@ type PodConditionType string // These are valid conditions of pod. const ( - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" -) - -// These are reasons for a pod's transition to a condition. -const ( + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" ) // PodCondition contains details for the current condition of this pod. @@ -2976,38 +2876,23 @@ type PodSpec struct { // configuration based on DNSPolicy. // +optional DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` + // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md // +optional ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - // This is a beta feature as of Kubernetes v1.14. + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` - // EnableServiceLinks indicates whether information about services should be injected into pod's - // environment variables, matching the syntax of Docker links. - // Optional: Defaults to true. - // +optional - EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` } -const ( - // The default value for enableServiceLinks attribute. - DefaultEnableServiceLinks = true -) - // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. type HostAlias struct { @@ -3028,9 +2913,6 @@ type PodSecurityContext struct { // takes precedence for that container. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` - // Windows security options. - // +optional - WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -3391,8 +3273,8 @@ type ReplicationControllerCondition struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. @@ -3532,9 +3414,6 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +patchMergeKey=port // +patchStrategy=merge - // +listType=map - // +listMapKey=port - // +listMapKey=protocol Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // Route service traffic to pods with label keys and values matching this @@ -3571,7 +3450,7 @@ type ServiceSpec struct { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` @@ -4732,7 +4611,6 @@ type EventSeries struct { // Time of the last occurrence observed LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` // State of this Series: Ongoing or Finished - // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` } @@ -5109,10 +4987,6 @@ const ( TLSCertKey = "tls.crt" // TLSPrivateKeyKey is the key for the private key field in a TLS secret. TLSPrivateKeyKey = "tls.key" - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They are used for authn. - SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5299,9 +5173,6 @@ type SecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` - // Windows security options. - // +optional - WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -5372,21 +5243,6 @@ type SELinuxOptions struct { Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` } -// WindowsSecurityContextOptions contain Windows-specific options and credentials. -type WindowsSecurityContextOptions struct { - // GMSACredentialSpecName is the name of the GMSA credential spec to use. - // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. - // +optional - GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"` - - // GMSACredentialSpec is where the GMSA admission webhook - // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - // GMSA credential spec named by the GMSACredentialSpecName field. - // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. - // +optional - GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RangeAllocation is not a public type. diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index c0489ca17..c781e5452 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -123,29 +123,15 @@ var map_CSIPersistentVolumeSource = map[string]string{ "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", "volumeAttributes": "Attributes of the volume to publish.", - "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { return map_CSIPersistentVolumeSource } -var map_CSIVolumeSource = map[string]string{ - "": "Represents a source location of a volume to mount, managed by an external CSI driver", - "driver": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", - "readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", - "fsType": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", - "volumeAttributes": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", -} - -func (CSIVolumeSource) SwaggerDoc() map[string]string { - return map_CSIVolumeSource -} - var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -272,7 +258,7 @@ func (ConfigMapEnvSource) SwaggerDoc() map[string]string { var map_ConfigMapKeySelector = map[string]string{ "": "Selects a key from a ConfigMap.", "key": "The key to select.", - "optional": "Specify whether the ConfigMap or its key must be defined", + "optional": "Specify whether the ConfigMap or it's key must be defined", } func (ConfigMapKeySelector) SwaggerDoc() map[string]string { @@ -305,7 +291,7 @@ func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or its keys must be defined", + "optional": "Specify whether the ConfigMap or it's keys must be defined", } func (ConfigMapProjection) SwaggerDoc() map[string]string { @@ -316,7 +302,7 @@ var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or its keys must be defined", + "optional": "Specify whether the ConfigMap or it's keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -335,7 +321,7 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", @@ -611,7 +597,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time of the last occurrence observed", - "state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18", + "state": "State of this Series: Ongoing or Finished", } func (EventSeries) SwaggerDoc() map[string]string { @@ -709,18 +695,6 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { return map_GitRepoVolumeSource } -var map_GlusterfsPersistentVolumeSource = map[string]string{ - "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", -} - -func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_GlusterfsPersistentVolumeSource -} - var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", @@ -838,7 +812,7 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -1236,7 +1210,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", } @@ -1299,7 +1273,7 @@ var map_PersistentVolumeSource = map[string]string{ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", - "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", + "csi": "CSI represents storage that handled by an external CSI driver (Beta feature).", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1314,7 +1288,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", } @@ -1502,7 +1476,6 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "windowsOptions": "Windows security options.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -1553,10 +1526,8 @@ var map_PodSpec = map[string]string{ "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", - "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1665,7 +1636,7 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { } var map_Probe = map[string]string{ - "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", @@ -1694,7 +1665,6 @@ var map_QuobyteVolumeSource = map[string]string{ "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", "user": "User to map volume access to Defaults to serivceaccount user", "group": "Group to map volume access to Default is no group", - "tenant": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", } func (QuobyteVolumeSource) SwaggerDoc() map[string]string { @@ -1959,7 +1929,7 @@ func (SecretEnvSource) SwaggerDoc() map[string]string { var map_SecretKeySelector = map[string]string{ "": "SecretKeySelector selects a key of a Secret.", "key": "The key of the secret to select from. Must be a valid secret key.", - "optional": "Specify whether the Secret or its key must be defined", + "optional": "Specify whether the Secret or it's key must be defined", } func (SecretKeySelector) SwaggerDoc() map[string]string { @@ -2001,7 +1971,7 @@ var map_SecretVolumeSource = map[string]string{ "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or its keys must be defined", + "optional": "Specify whether the Secret or it's keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -2013,7 +1983,6 @@ var map_SecurityContext = map[string]string{ "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "windowsOptions": "Windows security options.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -2116,7 +2085,7 @@ var map_ServiceSpec = map[string]string{ "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", @@ -2232,7 +2201,7 @@ func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { } var map_TopologySelectorTerm = map[string]string{ - "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", "matchLabelExpressions": "A list of topology selector requirements by labels.", } @@ -2277,7 +2246,6 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -2317,24 +2285,23 @@ var map_VolumeSource = map[string]string{ "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "projected": "Items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", - "csi": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", } func (VolumeSource) SwaggerDoc() map[string]string { @@ -2363,14 +2330,4 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { return map_WeightedPodAffinityTerm } -var map_WindowsSecurityContextOptions = map[string]string{ - "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", - "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", - "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", -} - -func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { - return map_WindowsSecurityContextOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go deleted file mode 100644 index 4497760d3..000000000 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - - LabelInstanceType = "beta.kubernetes.io/instance-type" - - LabelOSStable = "kubernetes.io/os" - LabelArchStable = "kubernetes.io/arch" - - // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) - LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" - // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) - LabelNamespaceSuffixNode = "node.kubernetes.io" - - // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled - LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" -) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 114e1974c..f8f3471a5 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -237,11 +237,6 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } - if in.ControllerExpandSecretRef != nil { - in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef - *out = new(SecretReference) - **out = **in - } return } @@ -255,44 +250,6 @@ func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { - *out = *in - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.VolumeAttributes != nil { - in, out := &in.VolumeAttributes, &out.VolumeAttributes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.NodePublishSecretRef != nil { - in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. -func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { - if in == nil { - return nil - } - out := new(CSIVolumeSource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -485,7 +442,7 @@ func (in *ComponentStatus) DeepCopyObject() runtime.Object { func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ComponentStatus, len(*in)) @@ -610,7 +567,7 @@ func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ConfigMap, len(*in)) @@ -1166,7 +1123,7 @@ func (in *Endpoints) DeepCopyObject() runtime.Object { func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Endpoints, len(*in)) @@ -1323,7 +1280,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) @@ -1541,27 +1498,6 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { - *out = *in - if in.EndpointsNamespace != nil { - in, out := &in.EndpointsNamespace, &out.EndpointsNamespace - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. -func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { *out = *in @@ -1880,7 +1816,7 @@ func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LimitRange, len(*in)) @@ -1936,7 +1872,7 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -2087,7 +2023,7 @@ func (in *Namespace) DeepCopyObject() runtime.Object { func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Namespace, len(*in)) @@ -2316,7 +2252,7 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Node, len(*in)) @@ -2695,7 +2631,7 @@ func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondi func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolumeClaim, len(*in)) @@ -2821,7 +2757,7 @@ func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVo func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolume, len(*in)) @@ -2870,8 +2806,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsPersistentVolumeSource) - (*in).DeepCopyInto(*out) + *out = new(GlusterfsVolumeSource) + **out = **in } if in.NFS != nil { in, out := &in.NFS, &out.NFS @@ -3302,7 +3238,7 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { func (in *PodList) DeepCopyInto(out *PodList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Pod, len(*in)) @@ -3454,11 +3390,6 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(SELinuxOptions) **out = **in } - if in.WindowsOptions != nil { - in, out := &in.WindowsOptions, &out.WindowsOptions - *out = new(WindowsSecurityContextOptions) - (*in).DeepCopyInto(*out) - } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -3623,16 +3554,6 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } - if in.EnableServiceLinks != nil { - in, out := &in.EnableServiceLinks, &out.EnableServiceLinks - *out = new(bool) - **out = **in - } - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(PreemptionPolicy) - **out = **in - } return } @@ -3745,7 +3666,7 @@ func (in *PodTemplate) DeepCopyObject() runtime.Object { func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodTemplate, len(*in)) @@ -4057,7 +3978,7 @@ func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondi func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicationController, len(*in)) @@ -4213,7 +4134,7 @@ func (in *ResourceQuota) DeepCopyObject() runtime.Object { func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceQuota, len(*in)) @@ -4533,7 +4454,7 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { func (in *SecretList) DeepCopyInto(out *SecretList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Secret, len(*in)) @@ -4658,11 +4579,6 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = new(SELinuxOptions) **out = **in } - if in.WindowsOptions != nil { - in, out := &in.WindowsOptions, &out.WindowsOptions - *out = new(WindowsSecurityContextOptions) - (*in).DeepCopyInto(*out) - } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -4805,7 +4721,7 @@ func (in *ServiceAccount) DeepCopyObject() runtime.Object { func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServiceAccount, len(*in)) @@ -4859,7 +4775,7 @@ func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjecti func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -5441,11 +5357,6 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(StorageOSVolumeSource) (*in).DeepCopyInto(*out) } - if in.CSI != nil { - in, out := &in.CSI, &out.CSI - *out = new(CSIVolumeSource) - (*in).DeepCopyInto(*out) - } return } @@ -5491,29 +5402,3 @@ func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { - *out = *in - if in.GMSACredentialSpecName != nil { - in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName - *out = new(string) - **out = **in - } - if in.GMSACredentialSpec != nil { - in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. -func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { - if in == nil { - return nil - } - out := new(WindowsSecurityContextOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go index 9bec7b3cc..8b1a3e312 100644 --- a/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=events.k8s.io - package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index bb0c881b5..e24a82ab1 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -253,6 +254,24 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index eef456453..dc48ddb06 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -96,7 +96,6 @@ type EventSeries struct { // Time when last Event from the series was seen before last heartbeat. LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` // Information whether this series is ongoing or finished. - // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` } diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index bbc91ed9b..a15672c19 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -63,7 +63,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", - "state": "Information whether this series is ongoing or finished. Deprecated. Planned removal for 1.18", + "state": "Information whether this series is ongoing or finished.", } func (EventSeries) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index 779ebaf6e..e52e142c6 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -70,7 +70,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index fa799f302..8ce18304b 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 4439535dc..72d64db3e 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -24,9 +25,12 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto It has these top-level messages: - AllowedCSIDriver AllowedFlexVolume AllowedHostPath + CustomMetricCurrentStatus + CustomMetricCurrentStatusList + CustomMetricTarget + CustomMetricTargetList DaemonSet DaemonSetCondition DaemonSetList @@ -73,9 +77,7 @@ limitations under the License. RollbackConfig RollingUpdateDaemonSet RollingUpdateDeployment - RunAsGroupStrategyOptions RunAsUserStrategyOptions - RuntimeClassStrategyOptions SELinuxStrategyOptions Scale ScaleSpec @@ -89,6 +91,7 @@ import fmt "fmt" import math "math" import k8s_io_api_core_v1 "k8s.io/api/core/v1" + import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" @@ -111,254 +114,263 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } +func (*CustomMetricCurrentStatus) ProtoMessage() {} +func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } +func (*CustomMetricCurrentStatusList) ProtoMessage() {} +func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } +func (*CustomMetricTarget) ProtoMessage() {} +func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } +func (*CustomMetricTargetList) ProtoMessage() {} +func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{11} +} func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} + return fileDescriptorGenerated, []int{35} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{45} + return fileDescriptorGenerated, []int{48} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} -} - -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{49} + return fileDescriptorGenerated, []int{51} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{50} -} - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{52} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} + return fileDescriptorGenerated, []int{57} } func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") + proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") + proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") + proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") + proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -405,16 +417,14 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -424,19 +434,19 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) return i, nil } -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -446,19 +456,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i += copy(dAtA[i:], m.PathPrefix) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { +func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -468,23 +486,113 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { +func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n1, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *CustomMetricCurrentStatusList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomMetricCurrentStatusList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CustomMetricTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *CustomMetricTarget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *CustomMetricTargetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomMetricTargetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -506,27 +614,27 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n1 + i += n3 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) + n4, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n4 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) + n5, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n5 return i, nil } @@ -556,11 +664,11 @@ func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n6 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -590,11 +698,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n7, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n7 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -629,28 +737,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n6, err := m.Selector.MarshalTo(dAtA[i:]) + n8, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n8 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n7, err := m.Template.MarshalTo(dAtA[i:]) + n9, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n9 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n8, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n10 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -747,11 +855,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n9, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n11 } return i, nil } @@ -774,27 +882,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n12 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n11, err := m.Spec.MarshalTo(dAtA[i:]) + n13, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n13 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n12, err := m.Status.MarshalTo(dAtA[i:]) + n14, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n14 return i, nil } @@ -832,19 +940,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n13, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n15 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n14, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n16 return i, nil } @@ -866,11 +974,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n15, err := m.ListMeta.MarshalTo(dAtA[i:]) + n17, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n17 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -930,11 +1038,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n16, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n18 return i, nil } @@ -962,28 +1070,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n17, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n19 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n18, err := m.Template.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n20 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n19, err := m.Strategy.MarshalTo(dAtA[i:]) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n21 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1004,11 +1112,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n20, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n22 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -1094,11 +1202,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n23 } return i, nil } @@ -1159,11 +1267,11 @@ func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n22, err := m.Backend.MarshalTo(dAtA[i:]) + n24, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n24 return i, nil } @@ -1300,27 +1408,27 @@ func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n25 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) + n26, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n26 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) + n27, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n27 return i, nil } @@ -1346,11 +1454,11 @@ func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n26, err := m.ServicePort.MarshalTo(dAtA[i:]) + n28, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n28 return i, nil } @@ -1372,11 +1480,11 @@ func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + n29, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n29 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1414,11 +1522,11 @@ func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n28, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n30 return i, nil } @@ -1441,11 +1549,11 @@ func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n29, err := m.HTTP.MarshalTo(dAtA[i:]) + n31, err := m.HTTP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n31 } return i, nil } @@ -1469,11 +1577,11 @@ func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n30, err := m.Backend.MarshalTo(dAtA[i:]) + n32, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n32 } if len(m.TLS) > 0 { for _, msg := range m.TLS { @@ -1520,11 +1628,11 @@ func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n31, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n33 return i, nil } @@ -1583,19 +1691,19 @@ func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n34 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n33, err := m.Spec.MarshalTo(dAtA[i:]) + n35, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n35 return i, nil } @@ -1701,11 +1809,11 @@ func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n34, err := m.ListMeta.MarshalTo(dAtA[i:]) + n36, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n36 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1740,31 +1848,31 @@ func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n35, err := m.PodSelector.MarshalTo(dAtA[i:]) + n37, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n37 } if m.NamespaceSelector != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n36, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n38 } if m.IPBlock != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n37, err := m.IPBlock.MarshalTo(dAtA[i:]) + n39, err := m.IPBlock.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n39 } return i, nil } @@ -1794,11 +1902,11 @@ func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n38, err := m.Port.MarshalTo(dAtA[i:]) + n40, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n40 } return i, nil } @@ -1821,11 +1929,11 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n39, err := m.PodSelector.MarshalTo(dAtA[i:]) + n41, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n41 if len(m.Ingress) > 0 { for _, msg := range m.Ingress { dAtA[i] = 0x12 @@ -1886,19 +1994,19 @@ func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n42 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n41, err := m.Spec.MarshalTo(dAtA[i:]) + n43, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n43 return i, nil } @@ -1920,11 +2028,11 @@ func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n42, err := m.ListMeta.MarshalTo(dAtA[i:]) + n44, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n44 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2062,35 +2170,35 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n43, err := m.SELinux.MarshalTo(dAtA[i:]) + n45, err := m.SELinux.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n45 dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n44, err := m.RunAsUser.MarshalTo(dAtA[i:]) + n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n46 dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n45, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n47 dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n46, err := m.FSGroup.MarshalTo(dAtA[i:]) + n48, err := m.FSGroup.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n48 dAtA[i] = 0x70 i++ if m.ReadOnlyRootFilesystem { @@ -2200,44 +2308,6 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n47, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.RuntimeClass != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RuntimeClass.Size())) - n48, err := m.RuntimeClass.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - } return i, nil } @@ -2557,7 +2627,7 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2567,7 +2637,7 @@ func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -2591,7 +2661,7 @@ func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2601,7 +2671,7 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { +func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -2610,22 +2680,20 @@ func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.SELinuxOptions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n59 } return i, nil } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2635,78 +2703,7 @@ func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.DefaultRuntimeClassName != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i += copy(dAtA[i:], *m.DefaultRuntimeClassName) - } - return i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - } - return i, nil -} - -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -2840,6 +2837,24 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2849,28 +2864,64 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *AllowedCSIDriver) Size() (n int) { +func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l - l = len(m.Name) + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedFlexVolume) Size() (n int) { +func (m *AllowedHostPath) Size() (n int) { var l int _ = l - l = len(m.Driver) + l = len(m.PathPrefix) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } -func (m *AllowedHostPath) Size() (n int) { +func (m *CustomMetricCurrentStatus) Size() (n int) { var l int _ = l - l = len(m.PathPrefix) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - n += 2 + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricCurrentStatusList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomMetricTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricTargetList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3484,20 +3535,6 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } return n } @@ -3613,20 +3650,6 @@ func (m *RollingUpdateDeployment) Size() (n int) { return n } -func (m *RunAsGroupStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *RunAsUserStrategyOptions) Size() (n int) { var l int _ = l @@ -3641,22 +3664,6 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } -func (m *RuntimeClassStrategyOptions) Size() (n int) { - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *SELinuxStrategyOptions) Size() (n int) { var l int _ = l @@ -3732,33 +3739,65 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AllowedCSIDriver) String() string { +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedHostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricCurrentStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&AllowedCSIDriver{`, + s := strings.Join([]string{`&CustomMetricCurrentStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *AllowedFlexVolume) String() string { +func (this *CustomMetricCurrentStatusList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *AllowedHostPath) String() string { +func (this *CustomMetricTarget) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + s := strings.Join([]string{`&CustomMetricTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTargetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricTargetList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4232,9 +4271,6 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, - `RuntimeClass:` + strings.Replace(fmt.Sprintf("%v", this.RuntimeClass), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -4344,17 +4380,6 @@ func (this *RollingUpdateDeployment) String() string { }, "") return s } -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" @@ -4366,17 +4391,6 @@ func (this *RunAsUserStrategyOptions) String() string { }, "") return s } -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} func (this *SELinuxStrategyOptions) String() string { if this == nil { return "nil" @@ -4451,7 +4465,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4474,15 +4488,15 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4507,7 +4521,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4530,7 +4544,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } return nil } -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4553,15 +4567,15 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4586,8 +4600,28 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.PathPrefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4609,7 +4643,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } return nil } -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { +func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4632,15 +4666,15 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4665,13 +4699,13 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4681,12 +4715,22 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4708,7 +4752,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonSet) Unmarshal(dAtA []byte) error { +func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4731,15 +4775,15 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4763,15 +4807,286 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, CustomMetricCurrentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricTarget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6282,14 +6597,51 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6299,80 +6651,41 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue } - m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -10182,103 +10495,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11393,116 +11609,6 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -11613,115 +11719,6 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -12117,14 +12114,51 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12134,80 +12168,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -12479,236 +12474,235 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 3695 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47, - 0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00, - 0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x26, 0x11, 0x25, 0xcb, 0x56, 0xaa, 0x3f, - 0xcc, 0x50, 0x72, 0x83, 0xa0, 0x49, 0xb3, 0x22, 0x47, 0xd4, 0x5a, 0xcb, 0xdd, 0xcd, 0xce, 0x52, - 0x11, 0x81, 0x1e, 0x7a, 0x28, 0x0a, 0x14, 0x68, 0xd1, 0x5e, 0xd2, 0xf6, 0xd8, 0xa0, 0x40, 0x4f, - 0x2d, 0xda, 0x5b, 0x7b, 0x08, 0x02, 0x14, 0x48, 0x01, 0xa3, 0x48, 0x8b, 0xdc, 0x9a, 0x93, 0xd0, - 0x28, 0xa7, 0xa2, 0xa7, 0xde, 0x0a, 0x1f, 0x8a, 0x62, 0x66, 0x67, 0xff, 0xef, 0x8a, 0x2b, 0xc5, - 0x16, 0x8a, 0xe2, 0xbb, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb, - 0x04, 0x2b, 0xfb, 0xf7, 0x69, 0x55, 0x35, 0x6a, 0xfb, 0xbd, 0x1d, 0x62, 0xe9, 0xc4, 0x26, 0xb4, - 0x76, 0x40, 0xf4, 0xb6, 0x61, 0xd5, 0x04, 0x41, 0x31, 0xd5, 0x1a, 0x39, 0xb4, 0x89, 0x4e, 0x55, - 0x43, 0xa7, 0xb5, 0x83, 0x5b, 0x3b, 0xc4, 0x56, 0x6e, 0xd5, 0x3a, 0x44, 0x27, 0x96, 0x62, 0x93, - 0x76, 0xd5, 0xb4, 0x0c, 0xdb, 0x40, 0x57, 0x1c, 0xf6, 0xaa, 0x62, 0xaa, 0x55, 0x9f, 0xbd, 0x2a, - 0xd8, 0x2f, 0xdd, 0xec, 0xa8, 0xf6, 0x5e, 0x6f, 0xa7, 0xda, 0x32, 0xba, 0xb5, 0x8e, 0xd1, 0x31, - 0x6a, 0x5c, 0x6a, 0xa7, 0xb7, 0xcb, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x07, 0xed, 0x92, 0x1c, 0x50, - 0xde, 0x32, 0x2c, 0x52, 0x3b, 0x88, 0x69, 0xbc, 0x74, 0xc7, 0xe7, 0xe9, 0x2a, 0xad, 0x3d, 0x55, - 0x27, 0x56, 0xbf, 0x66, 0xee, 0x77, 0xd8, 0x00, 0xad, 0x75, 0x89, 0xad, 0x24, 0x49, 0xd5, 0xd2, - 0xa4, 0xac, 0x9e, 0x6e, 0xab, 0x5d, 0x12, 0x13, 0xb8, 0x37, 0x48, 0x80, 0xb6, 0xf6, 0x48, 0x57, - 0x89, 0xc9, 0xbd, 0x9d, 0x26, 0xd7, 0xb3, 0x55, 0xad, 0xa6, 0xea, 0x36, 0xb5, 0xad, 0xa8, 0x90, - 0x7c, 0x07, 0xa6, 0x16, 0x35, 0xcd, 0xf8, 0x9c, 0xb4, 0x97, 0x9a, 0xab, 0xcb, 0x96, 0x7a, 0x40, - 0x2c, 0x74, 0x15, 0x0a, 0xba, 0xd2, 0x25, 0x65, 0xe9, 0xaa, 0x74, 0x7d, 0xa4, 0x3e, 0xf6, 0xfc, - 0xa8, 0x72, 0xe1, 0xf8, 0xa8, 0x52, 0xd8, 0x50, 0xba, 0x04, 0x73, 0x8a, 0xfc, 0x10, 0xa6, 0x85, - 0xd4, 0x8a, 0x46, 0x0e, 0x9f, 0x1a, 0x5a, 0xaf, 0x4b, 0xd0, 0x35, 0x18, 0x6e, 0x73, 0x00, 0x21, - 0x38, 0x21, 0x04, 0x87, 0x1d, 0x58, 0x2c, 0xa8, 0x32, 0x85, 0x49, 0x21, 0xfc, 0xc4, 0xa0, 0x76, - 0x43, 0xb1, 0xf7, 0xd0, 0x6d, 0x00, 0x53, 0xb1, 0xf7, 0x1a, 0x16, 0xd9, 0x55, 0x0f, 0x85, 0x38, - 0x12, 0xe2, 0xd0, 0xf0, 0x28, 0x38, 0xc0, 0x85, 0x6e, 0x40, 0xc9, 0x22, 0x4a, 0x7b, 0x53, 0xd7, - 0xfa, 0xe5, 0xdc, 0x55, 0xe9, 0x7a, 0xa9, 0x3e, 0x25, 0x24, 0x4a, 0x58, 0x8c, 0x63, 0x8f, 0x43, - 0xfe, 0x22, 0x07, 0x23, 0xcb, 0x0a, 0xe9, 0x1a, 0x7a, 0x93, 0xd8, 0xe8, 0x53, 0x28, 0xb1, 0xed, - 0x6a, 0x2b, 0xb6, 0xc2, 0xb5, 0x8d, 0xde, 0x7e, 0xab, 0xea, 0xbb, 0x93, 0xb7, 0x7a, 0x55, 0x73, - 0xbf, 0xc3, 0x06, 0x68, 0x95, 0x71, 0x57, 0x0f, 0x6e, 0x55, 0x37, 0x77, 0x9e, 0x91, 0x96, 0xbd, - 0x4e, 0x6c, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x02, 0x35, 0x49, 0x8b, 0x5b, - 0x36, 0x7a, 0xfb, 0x46, 0xf5, 0x44, 0x67, 0xad, 0x7a, 0x96, 0x35, 0x4d, 0xd2, 0xf2, 0x57, 0x9c, - 0xfd, 0xc2, 0x1c, 0x07, 0x3d, 0x85, 0x61, 0x6a, 0x2b, 0x76, 0x8f, 0x96, 0xf3, 0x1c, 0xb1, 0x9a, - 0x19, 0x91, 0x4b, 0xf9, 0x9b, 0xe1, 0xfc, 0xc6, 0x02, 0x4d, 0xfe, 0x8f, 0x1c, 0x20, 0x8f, 0x77, - 0xc9, 0xd0, 0xdb, 0xaa, 0xad, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xbb, 0x6f, 0xba, 0x2e, 0x70, 0xcd, - 0x35, 0x68, 0xab, 0x6f, 0x92, 0x17, 0x47, 0x95, 0xb9, 0xb8, 0x04, 0xa3, 0x60, 0x2e, 0x83, 0xd6, - 0x3c, 0x53, 0x73, 0x5c, 0xfa, 0x4e, 0x58, 0xf5, 0x8b, 0xa3, 0x4a, 0xc2, 0x61, 0xab, 0x7a, 0x48, - 0x61, 0x03, 0xd1, 0x01, 0x20, 0x4d, 0xa1, 0xf6, 0x96, 0xa5, 0xe8, 0xd4, 0xd1, 0xa4, 0x76, 0x89, - 0x58, 0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, - 0xd0, 0xc0, 0xbc, 0xd9, 0x22, 0x0a, 0x35, 0xf4, 0x72, 0x21, 0xec, 0xcd, 0x98, 0x8f, 0x62, 0x41, - 0x45, 0x6f, 0x40, 0xb1, 0x4b, 0x28, 0x55, 0x3a, 0xa4, 0x3c, 0xc4, 0x19, 0x27, 0x05, 0x63, 0x71, - 0xdd, 0x19, 0xc6, 0x2e, 0x5d, 0xfe, 0x4a, 0x82, 0x71, 0x6f, 0xe5, 0xd6, 0x54, 0x6a, 0xa3, 0xdf, - 0x8a, 0xf9, 0x61, 0x35, 0xdb, 0x94, 0x98, 0x34, 0xf7, 0x42, 0xcf, 0xe7, 0xdd, 0x91, 0x80, 0x0f, - 0xae, 0xc3, 0x90, 0x6a, 0x93, 0x2e, 0xdb, 0x87, 0xfc, 0xf5, 0xd1, 0xdb, 0xd7, 0xb3, 0xba, 0x4c, - 0x7d, 0x5c, 0x80, 0x0e, 0xad, 0x32, 0x71, 0xec, 0xa0, 0xc8, 0x7f, 0x5a, 0x08, 0x98, 0xcf, 0x5c, - 0x13, 0x7d, 0x0c, 0x25, 0x4a, 0x34, 0xd2, 0xb2, 0x0d, 0x4b, 0x98, 0xff, 0x76, 0x46, 0xf3, 0x95, - 0x1d, 0xa2, 0x35, 0x85, 0x68, 0x7d, 0x8c, 0xd9, 0xef, 0xfe, 0xc2, 0x1e, 0x24, 0xfa, 0x00, 0x4a, - 0x36, 0xe9, 0x9a, 0x9a, 0x62, 0x13, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, - 0x18, 0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, - 0xf4, 0xcc, 0x36, 0xe3, 0xb4, 0x59, 0x14, 0xec, 0xf4, 0x85, 0x27, 0xdd, 0xcb, 0xba, 0x36, 0xdb, - 0x21, 0xe9, 0xfa, 0x9c, 0xd0, 0x35, 0x11, 0x1e, 0xc7, 0x11, 0x2d, 0x68, 0x11, 0x26, 0xbb, 0xaa, - 0xce, 0xe2, 0x52, 0xbf, 0x49, 0x5a, 0x86, 0xde, 0xa6, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, - 0x72, 0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x3e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, - 0x3a, 0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, - 0x07, 0x2a, 0x9b, 0xe3, 0x13, 0x95, 0xda, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0xaa, 0x5d, 0x1e, 0xe6, - 0x36, 0x95, 0x8f, 0x8f, 0x2a, 0xb3, 0x38, 0x81, 0x8e, 0x13, 0xa5, 0xe4, 0x3f, 0x1b, 0x86, 0xc9, - 0x48, 0xbc, 0x41, 0x4f, 0x61, 0xae, 0xd5, 0xb3, 0x2c, 0xa2, 0xdb, 0x1b, 0xbd, 0xee, 0x0e, 0xb1, - 0x9a, 0xad, 0x3d, 0xd2, 0xee, 0x69, 0xa4, 0xcd, 0x1d, 0x65, 0xa8, 0xbe, 0x20, 0x2c, 0x9e, 0x5b, - 0x4a, 0xe4, 0xc2, 0x29, 0xd2, 0x6c, 0x15, 0x74, 0x3e, 0xb4, 0xae, 0x52, 0xea, 0x61, 0xe6, 0x38, - 0xa6, 0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0xaa, 0x5a, 0xa4, 0x1d, - 0xb5, 0x31, 0x1f, 0xb6, 0x71, 0x39, 0x91, 0x0b, 0xa7, 0x48, 0xa3, 0xbb, 0x30, 0xea, 0x68, 0xe3, - 0xfb, 0x27, 0x36, 0x7a, 0x46, 0x80, 0x8d, 0x6e, 0xf8, 0x24, 0x1c, 0xe4, 0x63, 0x53, 0x33, 0x76, - 0x28, 0xb1, 0x0e, 0x48, 0x3b, 0x7d, 0x83, 0x37, 0x63, 0x1c, 0x38, 0x41, 0x8a, 0x4d, 0xcd, 0xf1, - 0xc0, 0xd8, 0xd4, 0x86, 0xc3, 0x53, 0xdb, 0x4e, 0xe4, 0xc2, 0x29, 0xd2, 0xcc, 0x8f, 0x1d, 0x93, - 0x17, 0x0f, 0x14, 0x55, 0x53, 0x76, 0x34, 0x52, 0x2e, 0x86, 0xfd, 0x78, 0x23, 0x4c, 0xc6, 0x51, - 0x7e, 0xf4, 0x18, 0xa6, 0x9d, 0xa1, 0x6d, 0x5d, 0xf1, 0x40, 0x4a, 0x1c, 0xe4, 0x67, 0x02, 0x64, - 0x7a, 0x23, 0xca, 0x80, 0xe3, 0x32, 0xe8, 0x01, 0x4c, 0xb4, 0x0c, 0x4d, 0xe3, 0xfe, 0xb8, 0x64, - 0xf4, 0x74, 0xbb, 0x3c, 0xc2, 0x51, 0x10, 0x3b, 0x8f, 0x4b, 0x21, 0x0a, 0x8e, 0x70, 0x22, 0x02, - 0xd0, 0x72, 0x13, 0x0e, 0x2d, 0x03, 0x8f, 0x8f, 0xb7, 0xb2, 0xc6, 0x00, 0x2f, 0x55, 0xf9, 0x35, - 0x80, 0x37, 0x44, 0x71, 0x00, 0x58, 0xfe, 0x67, 0x09, 0xe6, 0x53, 0x42, 0x07, 0x7a, 0x37, 0x94, - 0x62, 0x7f, 0x35, 0x92, 0x62, 0x2f, 0xa7, 0x88, 0x05, 0xf2, 0xac, 0x0e, 0xe3, 0x16, 0x9b, 0x95, - 0xde, 0x71, 0x58, 0x44, 0x8c, 0xbc, 0x3b, 0x60, 0x1a, 0x38, 0x28, 0xe3, 0xc7, 0xfc, 0xe9, 0xe3, - 0xa3, 0xca, 0x78, 0x88, 0x86, 0xc3, 0xf0, 0xf2, 0x9f, 0xe7, 0x00, 0x96, 0x89, 0xa9, 0x19, 0xfd, - 0x2e, 0xd1, 0xcf, 0xa3, 0x86, 0xda, 0x0c, 0xd5, 0x50, 0x37, 0x07, 0x6d, 0x8f, 0x67, 0x5a, 0x6a, - 0x11, 0xf5, 0x9b, 0x91, 0x22, 0xaa, 0x96, 0x1d, 0xf2, 0xe4, 0x2a, 0xea, 0xdf, 0xf2, 0x30, 0xe3, - 0x33, 0xfb, 0x65, 0xd4, 0xc3, 0xd0, 0x1e, 0xff, 0x4a, 0x64, 0x8f, 0xe7, 0x13, 0x44, 0x5e, 0x59, - 0x1d, 0xf5, 0xf2, 0xeb, 0x19, 0xf4, 0x0c, 0x26, 0x58, 0xe1, 0xe4, 0xb8, 0x07, 0x2f, 0xcb, 0x86, - 0x4f, 0x5d, 0x96, 0x79, 0x09, 0x74, 0x2d, 0x84, 0x84, 0x23, 0xc8, 0x29, 0x65, 0x60, 0xf1, 0x55, - 0x97, 0x81, 0xf2, 0xd7, 0x12, 0x4c, 0xf8, 0xdb, 0x74, 0x0e, 0x45, 0xdb, 0x46, 0xb8, 0x68, 0x7b, - 0x23, 0xb3, 0x8b, 0xa6, 0x54, 0x6d, 0xff, 0xcd, 0x0a, 0x7c, 0x8f, 0x89, 0x1d, 0xf0, 0x1d, 0xa5, - 0xb5, 0x3f, 0xf8, 0x8e, 0x87, 0xbe, 0x90, 0x00, 0x89, 0x2c, 0xb0, 0xa8, 0xeb, 0x86, 0xad, 0x38, - 0xb1, 0xd2, 0x31, 0x6b, 0x35, 0xb3, 0x59, 0xae, 0xc6, 0xea, 0x76, 0x0c, 0xeb, 0x91, 0x6e, 0x5b, - 0x7d, 0x7f, 0x47, 0xe2, 0x0c, 0x38, 0xc1, 0x00, 0xa4, 0x00, 0x58, 0x02, 0x73, 0xcb, 0x10, 0x07, - 0xf9, 0x66, 0x86, 0x98, 0xc7, 0x04, 0x96, 0x0c, 0x7d, 0x57, 0xed, 0xf8, 0x61, 0x07, 0x7b, 0x40, - 0x38, 0x00, 0x7a, 0xe9, 0x11, 0xcc, 0xa7, 0x58, 0x8b, 0xa6, 0x20, 0xbf, 0x4f, 0xfa, 0xce, 0xb2, - 0x61, 0xf6, 0x27, 0x9a, 0x85, 0xa1, 0x03, 0x45, 0xeb, 0x39, 0xe1, 0x77, 0x04, 0x3b, 0x3f, 0x1e, - 0xe4, 0xee, 0x4b, 0xf2, 0x57, 0x43, 0x41, 0xdf, 0xe1, 0x15, 0xf3, 0x75, 0x76, 0x69, 0x35, 0x35, - 0xb5, 0xa5, 0x50, 0x51, 0x08, 0x8d, 0x39, 0x17, 0x56, 0x67, 0x0c, 0x7b, 0xd4, 0x50, 0x6d, 0x9d, - 0x7b, 0xb5, 0xb5, 0x75, 0xfe, 0xe5, 0xd4, 0xd6, 0xbf, 0x0d, 0x25, 0xea, 0x56, 0xd5, 0x05, 0x0e, - 0x79, 0xeb, 0x14, 0xf1, 0x55, 0x14, 0xd4, 0x9e, 0x02, 0xaf, 0x94, 0xf6, 0x40, 0x93, 0x8a, 0xe8, - 0xa1, 0x53, 0x16, 0xd1, 0x2f, 0xb5, 0xf0, 0x65, 0x31, 0xd5, 0x54, 0x7a, 0x94, 0xb4, 0x79, 0x20, - 0x2a, 0xf9, 0x31, 0xb5, 0xc1, 0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x43, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, - 0x4e, 0xa4, 0xbb, 0x2b, 0xda, 0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xa1, 0x74, 0x99, 0x28, 0x6d, - 0x4d, 0xd5, 0x89, 0xbb, 0x3e, 0x4e, 0x45, 0x74, 0xf9, 0xf8, 0xa8, 0x32, 0xdf, 0x48, 0x66, 0xc1, - 0x69, 0xb2, 0xf2, 0xf3, 0x02, 0x4c, 0x45, 0x33, 0x60, 0x4a, 0x91, 0x2a, 0x9d, 0xa9, 0x48, 0xbd, - 0x11, 0x38, 0x0c, 0x4e, 0x05, 0x1f, 0x78, 0xc1, 0x89, 0x1d, 0x88, 0x45, 0x98, 0x14, 0xd1, 0xc0, - 0x25, 0x8a, 0x32, 0xdd, 0xdb, 0xfd, 0xed, 0x30, 0x19, 0x47, 0xf9, 0x59, 0xe9, 0xe9, 0x57, 0x94, - 0x2e, 0x48, 0x21, 0x5c, 0x7a, 0x2e, 0x46, 0x19, 0x70, 0x5c, 0x06, 0xad, 0xc3, 0x4c, 0x4f, 0x8f, - 0x43, 0x39, 0xde, 0x78, 0x59, 0x40, 0xcd, 0x6c, 0xc7, 0x59, 0x70, 0x92, 0x1c, 0xda, 0x0d, 0x55, - 0xa3, 0xc3, 0x3c, 0xc2, 0xde, 0xce, 0x7c, 0x76, 0x32, 0x97, 0xa3, 0xe8, 0x21, 0x8c, 0x5b, 0xfc, - 0xde, 0xe1, 0x1a, 0xec, 0xd4, 0xee, 0x17, 0x85, 0xd8, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x37, 0xa1, - 0xdc, 0x2e, 0x65, 0x2d, 0xb7, 0xe5, 0x7f, 0x94, 0x82, 0x49, 0xc8, 0x2b, 0x81, 0x07, 0xbd, 0x32, - 0xc5, 0x24, 0x02, 0xd5, 0x91, 0x91, 0x5c, 0xfd, 0xde, 0x3b, 0x55, 0xf5, 0xeb, 0x27, 0xcf, 0xc1, - 0xe5, 0xef, 0x97, 0x12, 0xcc, 0xad, 0x34, 0x1f, 0x5b, 0x46, 0xcf, 0x74, 0xcd, 0xd9, 0x34, 0x9d, - 0x75, 0xfd, 0x05, 0x14, 0xac, 0x9e, 0xe6, 0xce, 0xe3, 0x75, 0x77, 0x1e, 0xb8, 0xa7, 0xb1, 0x79, - 0xcc, 0x44, 0xa4, 0x9c, 0x49, 0x30, 0x01, 0xb4, 0x01, 0xc3, 0x96, 0xa2, 0x77, 0x88, 0x9b, 0x56, - 0xaf, 0x0d, 0xb0, 0x7e, 0x75, 0x19, 0x33, 0xf6, 0x40, 0xf1, 0xc6, 0xa5, 0xb1, 0x40, 0x91, 0xff, - 0x48, 0x82, 0xc9, 0x27, 0x5b, 0x5b, 0x8d, 0x55, 0x9d, 0x9f, 0x68, 0xfe, 0xb6, 0x7a, 0x15, 0x0a, - 0xa6, 0x62, 0xef, 0x45, 0x33, 0x3d, 0xa3, 0x61, 0x4e, 0x41, 0x1f, 0x42, 0x91, 0x45, 0x12, 0xa2, - 0xb7, 0x33, 0x96, 0xda, 0x02, 0xbe, 0xee, 0x08, 0xf9, 0x15, 0xa2, 0x18, 0xc0, 0x2e, 0x9c, 0xbc, - 0x0f, 0xb3, 0x01, 0x73, 0xd8, 0x7a, 0x3c, 0x65, 0xd9, 0x11, 0x35, 0x61, 0x88, 0x69, 0x66, 0x39, - 0x30, 0x9f, 0xe1, 0x31, 0x33, 0x32, 0x25, 0xbf, 0xd2, 0x61, 0xbf, 0x28, 0x76, 0xb0, 0xe4, 0x75, - 0x18, 0xe7, 0x0f, 0xca, 0x86, 0x65, 0xf3, 0x65, 0x41, 0x57, 0x20, 0xdf, 0x55, 0x75, 0x91, 0x67, - 0x47, 0x85, 0x4c, 0x9e, 0xe5, 0x08, 0x36, 0xce, 0xc9, 0xca, 0xa1, 0x88, 0x3c, 0x3e, 0x59, 0x39, - 0xc4, 0x6c, 0x5c, 0x7e, 0x0c, 0x45, 0xb1, 0xdc, 0x41, 0xa0, 0xfc, 0xc9, 0x40, 0xf9, 0x04, 0xa0, - 0x4d, 0x28, 0xae, 0x36, 0xea, 0x9a, 0xe1, 0x54, 0x5d, 0x2d, 0xb5, 0x6d, 0x45, 0xf7, 0x62, 0x69, - 0x75, 0x19, 0x63, 0x4e, 0x41, 0x32, 0x0c, 0x93, 0xc3, 0x16, 0x31, 0x6d, 0xee, 0x11, 0x23, 0x75, - 0x60, 0xbb, 0xfc, 0x88, 0x8f, 0x60, 0x41, 0x91, 0xff, 0x38, 0x07, 0x45, 0xb1, 0x1c, 0xe7, 0x70, - 0x0b, 0x5b, 0x0b, 0xdd, 0xc2, 0xde, 0xcc, 0xe6, 0x1a, 0xa9, 0x57, 0xb0, 0xad, 0xc8, 0x15, 0xec, - 0x46, 0x46, 0xbc, 0x93, 0xef, 0x5f, 0x7f, 0x27, 0xc1, 0x44, 0xd8, 0x29, 0xd1, 0x5d, 0x18, 0x65, - 0x09, 0x47, 0x6d, 0x91, 0x0d, 0xbf, 0xce, 0xf5, 0x1e, 0x61, 0x9a, 0x3e, 0x09, 0x07, 0xf9, 0x50, - 0xc7, 0x13, 0x63, 0x7e, 0x24, 0x26, 0x9d, 0xbe, 0xa4, 0x3d, 0x5b, 0xd5, 0xaa, 0xce, 0xa7, 0x95, - 0xea, 0xaa, 0x6e, 0x6f, 0x5a, 0x4d, 0xdb, 0x52, 0xf5, 0x4e, 0x4c, 0x11, 0x77, 0xca, 0x20, 0xb2, - 0xfc, 0x0f, 0x12, 0x8c, 0x0a, 0x93, 0xcf, 0xe1, 0x56, 0xf1, 0x1b, 0xe1, 0x5b, 0xc5, 0xb5, 0x8c, - 0x07, 0x3c, 0xf9, 0x4a, 0xf1, 0x57, 0xbe, 0xe9, 0xec, 0x48, 0x33, 0xaf, 0xde, 0x33, 0xa8, 0x1d, - 0xf5, 0x6a, 0x76, 0x18, 0x31, 0xa7, 0xa0, 0x1e, 0x4c, 0xa9, 0x91, 0x18, 0x20, 0x96, 0xb6, 0x96, - 0xcd, 0x12, 0x4f, 0xac, 0x5e, 0x16, 0xf0, 0x53, 0x51, 0x0a, 0x8e, 0xa9, 0x90, 0x09, 0xc4, 0xb8, - 0xd0, 0x07, 0x50, 0xd8, 0xb3, 0x6d, 0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, - 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87, 0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, - 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93, 0x62, 0x29, 0x7a, 0x02, 0x79, 0x5b, 0xcb, 0x7a, 0x2d, 0x14, - 0x88, 0x5b, 0x6b, 0x4d, 0x3f, 0x20, 0x6d, 0xad, 0x35, 0x31, 0x83, 0x40, 0x9b, 0x30, 0xc4, 0xb2, - 0x0f, 0x3b, 0x82, 0xf9, 0xec, 0x47, 0x9a, 0xcd, 0xdf, 0x77, 0x08, 0xf6, 0x8b, 0x62, 0x07, 0x47, - 0xfe, 0x0c, 0xc6, 0x43, 0xe7, 0x14, 0x7d, 0x0a, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, - 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96, 0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, - 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28, 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, - 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c, 0x64, 0xee, 0x47, 0xb1, 0x33, 0x8e, 0x6e, 0x03, 0x50, 0xd2, - 0xb2, 0x88, 0xcd, 0x83, 0x41, 0x2e, 0xfc, 0x81, 0xb1, 0xe9, 0x51, 0x70, 0x80, 0x4b, 0xfe, 0x27, - 0x09, 0xc6, 0x37, 0x88, 0xfd, 0xb9, 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, - 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0, 0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0xd7, 0x12, 0xcc, - 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77, 0x1b, 0x86, 0x4c, 0xc3, 0xb2, 0xdd, 0x44, 0x7c, 0x2a, 0x85, - 0x2c, 0x8c, 0x05, 0x52, 0x31, 0x83, 0xc1, 0x0e, 0x1a, 0x5a, 0x83, 0x9c, 0x6d, 0x08, 0x57, 0x3d, - 0x1d, 0x26, 0x21, 0x56, 0x1d, 0x04, 0x66, 0x6e, 0xcb, 0xc0, 0x39, 0xdb, 0x60, 0x1b, 0x51, 0x0e, - 0x71, 0x05, 0x83, 0xcf, 0x2b, 0x9a, 0x01, 0x86, 0xc2, 0xae, 0x65, 0x74, 0xcf, 0x3c, 0x07, 0x6f, - 0x23, 0x56, 0x2c, 0xa3, 0x8b, 0x39, 0x96, 0xfc, 0x8d, 0x04, 0xd3, 0x21, 0xce, 0x73, 0x08, 0xfc, - 0x1f, 0x84, 0x03, 0xff, 0x8d, 0xd3, 0x4c, 0x24, 0x25, 0xfc, 0x7f, 0x93, 0x8b, 0x4c, 0x83, 0x4d, - 0x18, 0xed, 0xc2, 0xa8, 0x69, 0xb4, 0x9b, 0x2f, 0xe1, 0x73, 0xe0, 0x24, 0xcb, 0x9b, 0x0d, 0x1f, - 0x0b, 0x07, 0x81, 0xd1, 0x21, 0x4c, 0xeb, 0x4a, 0x97, 0x50, 0x53, 0x69, 0x91, 0xe6, 0x4b, 0x78, - 0x20, 0xb9, 0xc8, 0xbf, 0x37, 0x44, 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x87, 0xa2, 0x6a, 0xf2, 0x3a, - 0x4e, 0xd4, 0x2e, 0x03, 0xb3, 0xa8, 0x53, 0xf5, 0x39, 0xf1, 0x5c, 0xfc, 0xc0, 0x2e, 0x86, 0xfc, - 0xd7, 0x51, 0x6f, 0x60, 0xfe, 0x87, 0x1e, 0x43, 0x89, 0x37, 0x66, 0xb4, 0x0c, 0xcd, 0xfd, 0x32, - 0xc0, 0x76, 0xb6, 0x21, 0xc6, 0x5e, 0x1c, 0x55, 0x2e, 0x27, 0x3c, 0xfa, 0xba, 0x64, 0xec, 0x09, - 0xa3, 0x0d, 0x28, 0x98, 0x3f, 0xa5, 0x82, 0xe1, 0x49, 0x8e, 0x97, 0x2d, 0x1c, 0x47, 0xfe, 0xbd, - 0x7c, 0xc4, 0x5c, 0x9e, 0xea, 0x9e, 0xbd, 0xb4, 0x5d, 0xf7, 0x2a, 0xa6, 0xd4, 0x9d, 0xdf, 0x81, - 0xa2, 0xc8, 0xf0, 0xc2, 0x99, 0x7f, 0x71, 0x1a, 0x67, 0x0e, 0x66, 0x31, 0xef, 0xc2, 0xe2, 0x0e, - 0xba, 0xc0, 0xe8, 0x13, 0x18, 0x26, 0x8e, 0x0a, 0x27, 0x37, 0xde, 0x3b, 0x8d, 0x0a, 0x3f, 0xae, - 0xfa, 0x85, 0xaa, 0x18, 0x13, 0xa8, 0xe8, 0x5d, 0xb6, 0x5e, 0x8c, 0x97, 0x5d, 0x02, 0x69, 0xb9, - 0xc0, 0xd3, 0xd5, 0x15, 0x67, 0xda, 0xde, 0xf0, 0x8b, 0xa3, 0x0a, 0xf8, 0x3f, 0x71, 0x50, 0x42, - 0xfe, 0x17, 0x09, 0xa6, 0xf9, 0x0a, 0xb5, 0x7a, 0x96, 0x6a, 0xf7, 0xcf, 0x2d, 0x31, 0x3d, 0x0d, - 0x25, 0xa6, 0x3b, 0x03, 0x96, 0x25, 0x66, 0x61, 0x6a, 0x72, 0xfa, 0x56, 0x82, 0x8b, 0x31, 0xee, - 0x73, 0x88, 0x8b, 0xdb, 0xe1, 0xb8, 0xf8, 0xd6, 0x69, 0x27, 0x94, 0x12, 0x1b, 0xff, 0x6b, 0x3a, - 0x61, 0x3a, 0xfc, 0xa4, 0xdc, 0x06, 0x30, 0x2d, 0xf5, 0x40, 0xd5, 0x48, 0x47, 0x7c, 0x04, 0x2f, - 0x05, 0x5a, 0x9c, 0x3c, 0x0a, 0x0e, 0x70, 0x21, 0x0a, 0x73, 0x6d, 0xb2, 0xab, 0xf4, 0x34, 0x7b, - 0xb1, 0xdd, 0x5e, 0x52, 0x4c, 0x65, 0x47, 0xd5, 0x54, 0x5b, 0x15, 0xcf, 0x05, 0x23, 0xf5, 0x87, - 0xce, 0xc7, 0xe9, 0x24, 0x8e, 0x17, 0x47, 0x95, 0x2b, 0x49, 0x5f, 0x87, 0x5c, 0x96, 0x3e, 0x4e, - 0x81, 0x46, 0x7d, 0x28, 0x5b, 0xe4, 0xb3, 0x9e, 0x6a, 0x91, 0xf6, 0xb2, 0x65, 0x98, 0x21, 0xb5, - 0x79, 0xae, 0xf6, 0xd7, 0x8f, 0x8f, 0x2a, 0x65, 0x9c, 0xc2, 0x33, 0x58, 0x71, 0x2a, 0x3c, 0x7a, - 0x06, 0x33, 0x8a, 0x68, 0x46, 0x0b, 0x6a, 0x75, 0x4e, 0xc9, 0xfd, 0xe3, 0xa3, 0xca, 0xcc, 0x62, - 0x9c, 0x3c, 0x58, 0x61, 0x12, 0x28, 0xaa, 0x41, 0xf1, 0x80, 0xf7, 0xad, 0xd1, 0xf2, 0x10, 0xc7, - 0x67, 0x89, 0xa0, 0xe8, 0xb4, 0xb2, 0x31, 0xcc, 0xe1, 0x95, 0x26, 0x3f, 0x7d, 0x2e, 0x17, 0xbb, - 0x50, 0xb2, 0x5a, 0x52, 0x9c, 0x78, 0xfe, 0x62, 0x5c, 0xf2, 0xa3, 0xd6, 0x13, 0x9f, 0x84, 0x83, - 0x7c, 0xe8, 0x63, 0x18, 0xd9, 0x13, 0xaf, 0x12, 0xb4, 0x5c, 0xcc, 0x94, 0x84, 0x43, 0xaf, 0x18, - 0xf5, 0x69, 0xa1, 0x62, 0xc4, 0x1d, 0xa6, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, - 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6, 0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, - 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda, 0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x0a, 0x45, 0x4a, 0xd6, 0x54, - 0xbd, 0x77, 0x58, 0x86, 0x4c, 0x1f, 0x95, 0x9b, 0x8f, 0x38, 0x77, 0xe4, 0x61, 0xcc, 0xd7, 0x20, - 0xe8, 0xd8, 0x85, 0x45, 0x7b, 0x30, 0x62, 0xf5, 0xf4, 0x45, 0xba, 0x4d, 0x89, 0x55, 0x1e, 0xe5, - 0x3a, 0x06, 0x85, 0x73, 0xec, 0xf2, 0x47, 0xb5, 0x78, 0x2b, 0xe4, 0x71, 0x60, 0x1f, 0x1c, 0xfd, - 0xa1, 0x04, 0x88, 0xf6, 0x4c, 0x53, 0x23, 0x5d, 0xa2, 0xdb, 0x8a, 0xc6, 0xdf, 0xe2, 0x68, 0x79, - 0x8c, 0xeb, 0x7c, 0x6f, 0xd0, 0xbc, 0x62, 0x82, 0x51, 0xe5, 0xde, 0xa3, 0x77, 0x9c, 0x15, 0x27, - 0xe8, 0x65, 0x4b, 0xbb, 0x4b, 0xf9, 0xdf, 0xe5, 0xf1, 0x4c, 0x4b, 0x9b, 0xfc, 0xe6, 0xe8, 0x2f, - 0xad, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xed, 0x11, 0x1b, 0x86, 0xbd, 0xa2, 0x6a, - 0x84, 0xf6, 0xa9, 0x4d, 0xba, 0xe5, 0x09, 0xbe, 0xed, 0x5e, 0xef, 0x07, 0x4e, 0xe4, 0xc2, 0x29, - 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x64, 0xb0, 0xf3, 0xe4, 0xc5, 0xac, 0x47, 0xb4, 0xa5, 0x68, 0xce, - 0x77, 0x80, 0x49, 0xae, 0xe0, 0xf5, 0xe3, 0xa3, 0x4a, 0x65, 0xf9, 0x64, 0x56, 0x3c, 0x08, 0x0b, - 0x7d, 0x08, 0x65, 0x25, 0x4d, 0xcf, 0x14, 0xd7, 0xf3, 0x1a, 0x8b, 0x43, 0xa9, 0x0a, 0x52, 0xa5, - 0x91, 0x0d, 0x53, 0x4a, 0xb8, 0x01, 0x95, 0x96, 0xa7, 0x33, 0x3d, 0x44, 0x46, 0xfa, 0x56, 0xfd, - 0xc7, 0x88, 0x08, 0x81, 0xe2, 0x98, 0x06, 0xf4, 0x3b, 0x80, 0x94, 0x68, 0xcf, 0x2c, 0x2d, 0xa3, - 0x4c, 0xe9, 0x27, 0xd6, 0x6c, 0xeb, 0xbb, 0x5d, 0x8c, 0x44, 0x71, 0x82, 0x1e, 0xb4, 0x06, 0xb3, - 0x62, 0x74, 0x5b, 0xa7, 0xca, 0x2e, 0x69, 0xf6, 0x69, 0xcb, 0xd6, 0x68, 0x79, 0x86, 0xc7, 0x3e, - 0xfe, 0xe1, 0x6b, 0x31, 0x81, 0x8e, 0x13, 0xa5, 0xd0, 0x7b, 0x30, 0xb5, 0x6b, 0x58, 0x3b, 0x6a, - 0xbb, 0x4d, 0x74, 0x17, 0x69, 0x96, 0x23, 0xcd, 0xb2, 0xd5, 0x58, 0x89, 0xd0, 0x70, 0x8c, 0x1b, - 0x51, 0xb8, 0x28, 0x90, 0x1b, 0x96, 0xd1, 0x5a, 0x37, 0x7a, 0xba, 0xed, 0x94, 0x44, 0x17, 0xbd, - 0x14, 0x73, 0x71, 0x31, 0x89, 0xe1, 0xc5, 0x51, 0xe5, 0x6a, 0x72, 0x05, 0xec, 0x33, 0xe1, 0x64, - 0x6c, 0xb4, 0x07, 0xc0, 0xe3, 0x82, 0x73, 0xfc, 0xe6, 0xf8, 0xf1, 0xbb, 0x9f, 0x25, 0xea, 0x24, - 0x9e, 0x40, 0xe7, 0x93, 0x9c, 0x47, 0xc6, 0x01, 0x6c, 0x76, 0x4b, 0x51, 0x22, 0x6d, 0xd5, 0xb4, - 0x3c, 0xcf, 0xf7, 0xba, 0x96, 0x6d, 0xaf, 0x3d, 0xb9, 0xc0, 0xa7, 0xa9, 0x28, 0x22, 0x8e, 0x2b, - 0x41, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d, 0xa1, 0xb4, 0x5c, 0xe6, 0xb3, 0x7c, 0x30, 0x78, - 0x96, 0x9e, 0x48, 0x74, 0x9e, 0x53, 0xc7, 0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, - 0x41, 0xe2, 0x2b, 0xd1, 0xf9, 0xf4, 0x56, 0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, - 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf, 0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, - 0x7e, 0xd9, 0x5f, 0x3d, 0xb8, 0xbf, 0xfa, 0x6b, 0x09, 0x26, 0xfc, 0xa5, 0xfb, 0xbf, 0xd7, 0xab, - 0xe3, 0xdb, 0x96, 0x72, 0x7b, 0xf8, 0xdb, 0x5c, 0x70, 0x02, 0xff, 0xef, 0x1b, 0x46, 0x7e, 0x7a, - 0x53, 0xb4, 0xfc, 0x6d, 0x1e, 0xa6, 0xa2, 0xa7, 0x31, 0xd4, 0x57, 0x20, 0x0d, 0xec, 0x2b, 0x68, - 0xc0, 0xec, 0x6e, 0x4f, 0xd3, 0xfa, 0x7c, 0x19, 0x02, 0xcd, 0x05, 0xce, 0x77, 0xc1, 0xd7, 0x84, - 0xe4, 0xec, 0x4a, 0x02, 0x0f, 0x4e, 0x94, 0x4c, 0xe9, 0x91, 0xc8, 0x9f, 0xa9, 0x47, 0x22, 0xf6, - 0xc9, 0xbe, 0x70, 0x8a, 0x4f, 0xf6, 0x89, 0xfd, 0x0e, 0x43, 0x67, 0xe8, 0x77, 0x38, 0x4b, 0x83, - 0x42, 0x42, 0x10, 0x1b, 0xd8, 0x2f, 0xfb, 0x1a, 0x5c, 0x12, 0x62, 0x36, 0xef, 0x1d, 0xd0, 0x6d, - 0xcb, 0xd0, 0x34, 0x62, 0x2d, 0xf7, 0xba, 0xdd, 0xbe, 0xfc, 0x0e, 0x4c, 0x84, 0xbb, 0x62, 0x9c, - 0x9d, 0x76, 0x1a, 0x73, 0xc4, 0xd7, 0xd9, 0xc0, 0x4e, 0x3b, 0xe3, 0xd8, 0xe3, 0x90, 0x7f, 0x5f, - 0x82, 0xb9, 0xe4, 0xee, 0x57, 0xa4, 0xc1, 0x44, 0x57, 0x39, 0x0c, 0x76, 0x24, 0x4b, 0x67, 0x7c, - 0x37, 0xe3, 0xed, 0x10, 0xeb, 0x21, 0x2c, 0x1c, 0xc1, 0x96, 0x7f, 0x94, 0x60, 0x3e, 0xa5, 0x11, - 0xe1, 0x7c, 0x2d, 0x41, 0x1f, 0x41, 0xa9, 0xab, 0x1c, 0x36, 0x7b, 0x56, 0x87, 0x9c, 0xf9, 0xa5, - 0x90, 0x47, 0x8c, 0x75, 0x81, 0x82, 0x3d, 0x3c, 0xf9, 0x2f, 0x25, 0xf8, 0x59, 0x6a, 0xf5, 0x84, - 0xee, 0x85, 0x7a, 0x26, 0xe4, 0x48, 0xcf, 0x04, 0x8a, 0x0b, 0xbe, 0xa2, 0x96, 0x89, 0x2f, 0x25, - 0x28, 0xa7, 0xdd, 0x2c, 0xd1, 0xdd, 0x90, 0x91, 0x3f, 0x8f, 0x18, 0x39, 0x1d, 0x93, 0x7b, 0x45, - 0x36, 0xfe, 0xab, 0x04, 0x97, 0x4f, 0xa8, 0xd0, 0xbc, 0xab, 0x12, 0x69, 0x07, 0xb9, 0xf8, 0xa3, - 0xb6, 0xf8, 0x22, 0xe6, 0x5f, 0x95, 0x12, 0x78, 0x70, 0xaa, 0x34, 0xda, 0x86, 0x79, 0x71, 0x4f, - 0x8b, 0xd2, 0x44, 0xf1, 0xc1, 0x5b, 0xcb, 0x96, 0x93, 0x59, 0x70, 0x9a, 0xac, 0xfc, 0x37, 0x12, - 0xcc, 0x25, 0x3f, 0x19, 0xa0, 0xb7, 0x43, 0x4b, 0x5e, 0x89, 0x2c, 0xf9, 0x64, 0x44, 0x4a, 0x2c, - 0xf8, 0x27, 0x30, 0x21, 0x1e, 0x16, 0x04, 0x8c, 0x70, 0x66, 0x39, 0x29, 0x45, 0x09, 0x08, 0xb7, - 0xbc, 0xe5, 0xc7, 0x24, 0x3c, 0x86, 0x23, 0x68, 0xf2, 0x1f, 0xe4, 0x60, 0xa8, 0xd9, 0x52, 0x34, - 0x72, 0x0e, 0xd5, 0xed, 0xfb, 0xa1, 0xea, 0x76, 0xd0, 0x3f, 0x6d, 0x71, 0xab, 0x52, 0x0b, 0x5b, - 0x1c, 0x29, 0x6c, 0xdf, 0xcc, 0x84, 0x76, 0x72, 0x4d, 0xfb, 0x6b, 0x30, 0xe2, 0x29, 0x3d, 0x5d, - 0xaa, 0x95, 0xff, 0x22, 0x07, 0xa3, 0x01, 0x15, 0xa7, 0x4c, 0xd4, 0xbb, 0xa1, 0x02, 0x27, 0x9f, - 0xe1, 0xee, 0x16, 0xd0, 0x55, 0x75, 0x4b, 0x1a, 0xa7, 0xe9, 0xd8, 0x6f, 0x33, 0x8d, 0x57, 0x3a, - 0xef, 0xc0, 0x84, 0xad, 0x58, 0x1d, 0x62, 0x7b, 0x9f, 0x35, 0xf2, 0xdc, 0x17, 0xbd, 0x56, 0xf5, - 0xad, 0x10, 0x15, 0x47, 0xb8, 0x2f, 0x3d, 0x84, 0xf1, 0x90, 0xb2, 0x53, 0xf5, 0x0c, 0xff, 0xbd, - 0x04, 0x3f, 0x1f, 0xf8, 0xe8, 0x84, 0xea, 0xa1, 0x43, 0x52, 0x8d, 0x1c, 0x92, 0x85, 0x74, 0x80, - 0x57, 0xd7, 0x7b, 0x56, 0xbf, 0xf9, 0xfc, 0x87, 0x85, 0x0b, 0xdf, 0xfd, 0xb0, 0x70, 0xe1, 0xfb, - 0x1f, 0x16, 0x2e, 0xfc, 0xee, 0xf1, 0x82, 0xf4, 0xfc, 0x78, 0x41, 0xfa, 0xee, 0x78, 0x41, 0xfa, - 0xfe, 0x78, 0x41, 0xfa, 0xf7, 0xe3, 0x05, 0xe9, 0x4f, 0x7e, 0x5c, 0xb8, 0xf0, 0x51, 0x51, 0xc0, - 0xfd, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xd0, 0x60, 0xbe, 0x07, 0x3e, 0x00, 0x00, + // 3665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x49, + 0x56, 0xef, 0xac, 0x2a, 0xbb, 0xca, 0xcf, 0xed, 0xaf, 0xb0, 0xdb, 0x5d, 0xdb, 0x33, 0xed, 0xea, + 0xcd, 0x91, 0x9a, 0x9e, 0xa1, 0xa7, 0x6a, 0xba, 0xe7, 0x63, 0x87, 0x69, 0xb1, 0xbb, 0x2e, 0xbb, + 0xdd, 0xed, 0xc5, 0x1f, 0x35, 0x51, 0x76, 0xb3, 0x8c, 0x98, 0x65, 0xd2, 0x55, 0xe1, 0x72, 0x8e, + 0xb3, 0x32, 0x73, 0x33, 0x22, 0xbd, 0x2e, 0x09, 0x21, 0x0e, 0x08, 0x09, 0x09, 0x04, 0x1c, 0x96, + 0x0f, 0x71, 0x61, 0x2f, 0x9c, 0x40, 0x70, 0x83, 0xc3, 0x6a, 0x24, 0xa4, 0x45, 0x1a, 0xa1, 0x45, + 0xda, 0x1b, 0x7b, 0xb2, 0x18, 0xcf, 0x09, 0xf1, 0x0f, 0xa0, 0x3e, 0x20, 0x14, 0x91, 0x91, 0xdf, + 0x99, 0xae, 0x2a, 0x4f, 0xb7, 0x85, 0xd0, 0xde, 0x2a, 0xe3, 0xbd, 0xf7, 0x7b, 0x2f, 0x22, 0x5e, + 0xbc, 0xf7, 0xe2, 0xa3, 0x60, 0xe3, 0xf8, 0x7d, 0x5a, 0xd7, 0xad, 0xc6, 0xb1, 0x7b, 0x40, 0x1c, + 0x93, 0x30, 0x42, 0x1b, 0x27, 0xc4, 0xec, 0x5a, 0x4e, 0x43, 0x12, 0x34, 0x5b, 0x6f, 0x90, 0x53, + 0x46, 0x4c, 0xaa, 0x5b, 0x26, 0x6d, 0x9c, 0x3c, 0x38, 0x20, 0x4c, 0x7b, 0xd0, 0xe8, 0x11, 0x93, + 0x38, 0x1a, 0x23, 0xdd, 0xba, 0xed, 0x58, 0xcc, 0x42, 0xb7, 0x3d, 0xf6, 0xba, 0x66, 0xeb, 0xf5, + 0x90, 0xbd, 0x2e, 0xd9, 0x6f, 0xbd, 0xd9, 0xd3, 0xd9, 0x91, 0x7b, 0x50, 0xef, 0x58, 0xfd, 0x46, + 0xcf, 0xea, 0x59, 0x0d, 0x21, 0x75, 0xe0, 0x1e, 0x8a, 0x2f, 0xf1, 0x21, 0x7e, 0x79, 0x68, 0xb7, + 0xd4, 0x88, 0xf2, 0x8e, 0xe5, 0x90, 0xc6, 0x49, 0x4a, 0xe3, 0xad, 0x77, 0x42, 0x9e, 0xbe, 0xd6, + 0x39, 0xd2, 0x4d, 0xe2, 0x0c, 0x1a, 0xf6, 0x71, 0x4f, 0x08, 0x39, 0x84, 0x5a, 0xae, 0xd3, 0x21, + 0x63, 0x49, 0xd1, 0x46, 0x9f, 0x30, 0x2d, 0x4b, 0x57, 0x23, 0x4f, 0xca, 0x71, 0x4d, 0xa6, 0xf7, + 0xd3, 0x6a, 0xde, 0x1b, 0x26, 0x40, 0x3b, 0x47, 0xa4, 0xaf, 0xa5, 0xe4, 0xde, 0xce, 0x93, 0x73, + 0x99, 0x6e, 0x34, 0x74, 0x93, 0x51, 0xe6, 0x24, 0x85, 0xd4, 0x47, 0xb0, 0xb0, 0x6a, 0x18, 0xd6, + 0x0f, 0x48, 0x77, 0xc3, 0x20, 0xa7, 0xcf, 0x2c, 0xc3, 0xed, 0x13, 0x74, 0x17, 0x26, 0xbb, 0x8e, + 0x7e, 0x42, 0x9c, 0xaa, 0x72, 0x47, 0xb9, 0x37, 0xd5, 0x9c, 0xfd, 0xfc, 0xac, 0x76, 0xed, 0xfc, + 0xac, 0x36, 0xb9, 0x2e, 0x5a, 0xb1, 0xa4, 0xaa, 0x14, 0xe6, 0xa4, 0xf0, 0x53, 0x8b, 0xb2, 0x96, + 0xc6, 0x8e, 0xd0, 0x43, 0x00, 0x5b, 0x63, 0x47, 0x2d, 0x87, 0x1c, 0xea, 0xa7, 0x52, 0x1c, 0x49, + 0x71, 0x68, 0x05, 0x14, 0x1c, 0xe1, 0x42, 0xf7, 0xa1, 0xe2, 0x10, 0xad, 0xbb, 0x6b, 0x1a, 0x83, + 0x6a, 0xe1, 0x8e, 0x72, 0xaf, 0xd2, 0x9c, 0x97, 0x12, 0x15, 0x2c, 0xdb, 0x71, 0xc0, 0xa1, 0xfe, + 0xa5, 0x02, 0x5f, 0x5b, 0x73, 0x29, 0xb3, 0xfa, 0xdb, 0x84, 0x39, 0x7a, 0x67, 0xcd, 0x75, 0x1c, + 0x62, 0xb2, 0x36, 0xd3, 0x98, 0x4b, 0xd1, 0x1d, 0x28, 0x99, 0x5a, 0x9f, 0x48, 0xcd, 0xd7, 0x25, + 0x4e, 0x69, 0x47, 0xeb, 0x13, 0x2c, 0x28, 0xe8, 0x23, 0x98, 0x38, 0xd1, 0x0c, 0x97, 0x08, 0x55, + 0xd3, 0x0f, 0xeb, 0xf5, 0xd0, 0xfb, 0x82, 0x61, 0xab, 0xdb, 0xc7, 0x3d, 0xe1, 0x8e, 0xbe, 0x2f, + 0xd4, 0x3f, 0x74, 0x35, 0x93, 0xe9, 0x6c, 0xd0, 0x5c, 0x92, 0x90, 0xd7, 0xa5, 0xde, 0x67, 0x1c, + 0x0b, 0x7b, 0x90, 0xea, 0xef, 0xc0, 0xed, 0x5c, 0xd3, 0xb6, 0x74, 0xca, 0xd0, 0xc7, 0x30, 0xa1, + 0x33, 0xd2, 0xa7, 0x55, 0xe5, 0x4e, 0xf1, 0xde, 0xf4, 0xc3, 0xf7, 0xeb, 0x17, 0xba, 0x7e, 0x3d, + 0x17, 0xac, 0x39, 0x23, 0xcd, 0x98, 0xd8, 0xe4, 0x70, 0xd8, 0x43, 0x55, 0xff, 0x54, 0x01, 0x14, + 0x95, 0xd9, 0xd3, 0x9c, 0x1e, 0x61, 0x23, 0x0c, 0xca, 0x6f, 0x7c, 0xb5, 0x41, 0x59, 0x94, 0x90, + 0xd3, 0x9e, 0xc2, 0xd8, 0x98, 0xd8, 0xb0, 0x9c, 0x36, 0x49, 0x0c, 0xc6, 0xb3, 0xf8, 0x60, 0x3c, + 0x18, 0x63, 0x30, 0x3c, 0x94, 0x9c, 0x51, 0xf8, 0x61, 0x01, 0xa6, 0xd6, 0x35, 0xd2, 0xb7, 0xcc, + 0x36, 0x61, 0xe8, 0x13, 0xa8, 0xf0, 0xa5, 0xd9, 0xd5, 0x98, 0x26, 0x06, 0x60, 0xfa, 0xe1, 0x5b, + 0x17, 0xf5, 0x8e, 0xd6, 0x39, 0x77, 0xfd, 0xe4, 0x41, 0x7d, 0xf7, 0xe0, 0x53, 0xd2, 0x61, 0xdb, + 0x84, 0x69, 0xa1, 0x07, 0x87, 0x6d, 0x38, 0x40, 0x45, 0x3b, 0x50, 0xa2, 0x36, 0xe9, 0xc8, 0xb1, + 0xbb, 0x3f, 0xa4, 0x1b, 0x81, 0x65, 0x6d, 0x9b, 0x74, 0xc2, 0xc9, 0xe0, 0x5f, 0x58, 0xe0, 0xa0, + 0x67, 0x30, 0x49, 0xc5, 0x2c, 0x57, 0x8b, 0xa9, 0xd9, 0xb8, 0x18, 0xd1, 0xf3, 0x8d, 0x60, 0xb9, + 0x7a, 0xdf, 0x58, 0xa2, 0xa9, 0xff, 0x59, 0x00, 0x14, 0xf0, 0xae, 0x59, 0x66, 0x57, 0x67, 0xba, + 0x65, 0xa2, 0x0f, 0xa0, 0xc4, 0x06, 0xb6, 0xef, 0x1d, 0x77, 0x7d, 0x83, 0xf6, 0x06, 0x36, 0x79, + 0x7e, 0x56, 0x5b, 0x4e, 0x4b, 0x70, 0x0a, 0x16, 0x32, 0x68, 0x2b, 0x30, 0xb5, 0x20, 0xa4, 0xdf, + 0x89, 0xab, 0x7e, 0x7e, 0x56, 0xcb, 0x08, 0xc7, 0xf5, 0x00, 0x29, 0x6e, 0x20, 0x3a, 0x01, 0x64, + 0x68, 0x94, 0xed, 0x39, 0x9a, 0x49, 0x3d, 0x4d, 0x7a, 0x9f, 0xc8, 0x41, 0x78, 0x63, 0xb4, 0x49, + 0xe3, 0x12, 0xcd, 0x5b, 0xd2, 0x0a, 0xb4, 0x95, 0x42, 0xc3, 0x19, 0x1a, 0x78, 0xbc, 0x73, 0x88, + 0x46, 0x2d, 0xb3, 0x5a, 0x8a, 0xc7, 0x3b, 0x2c, 0x5a, 0xb1, 0xa4, 0xa2, 0xd7, 0xa1, 0xdc, 0x27, + 0x94, 0x6a, 0x3d, 0x52, 0x9d, 0x10, 0x8c, 0x73, 0x92, 0xb1, 0xbc, 0xed, 0x35, 0x63, 0x9f, 0xae, + 0xfe, 0x58, 0x81, 0x99, 0x60, 0xe4, 0x84, 0xb7, 0xff, 0x66, 0xca, 0x0f, 0xeb, 0xa3, 0x75, 0x89, + 0x4b, 0x0b, 0x2f, 0x0c, 0xa2, 0xa2, 0xdf, 0x12, 0xf1, 0xc1, 0x6d, 0x7f, 0x2d, 0x15, 0xc4, 0x5a, + 0xba, 0x37, 0xaa, 0xcb, 0xe4, 0x2c, 0xa1, 0x3f, 0x2b, 0x45, 0xcc, 0xe7, 0xae, 0x89, 0x3e, 0x86, + 0x0a, 0x25, 0x06, 0xe9, 0x30, 0xcb, 0x91, 0xe6, 0xbf, 0x3d, 0xa2, 0xf9, 0xda, 0x01, 0x31, 0xda, + 0x52, 0xb4, 0x79, 0x9d, 0xdb, 0xef, 0x7f, 0xe1, 0x00, 0x12, 0x7d, 0x08, 0x15, 0x46, 0xfa, 0xb6, + 0xa1, 0x31, 0x3f, 0x06, 0xbd, 0x16, 0xed, 0x02, 0xf7, 0x1c, 0x0e, 0xd6, 0xb2, 0xba, 0x7b, 0x92, + 0x4d, 0x2c, 0x9f, 0x60, 0x48, 0xfc, 0x56, 0x1c, 0xc0, 0xa0, 0x13, 0x98, 0x75, 0xed, 0x2e, 0xe7, + 0x64, 0x3c, 0xe3, 0xf5, 0x06, 0xd2, 0x93, 0xde, 0x1b, 0x75, 0x6c, 0xf6, 0x63, 0xd2, 0xcd, 0x65, + 0xa9, 0x6b, 0x36, 0xde, 0x8e, 0x13, 0x5a, 0xd0, 0x2a, 0xcc, 0xf5, 0x75, 0x93, 0x67, 0xae, 0x41, + 0x9b, 0x74, 0x2c, 0xb3, 0x4b, 0x85, 0x5b, 0x4d, 0x34, 0x6f, 0x4a, 0x80, 0xb9, 0xed, 0x38, 0x19, + 0x27, 0xf9, 0xd1, 0x77, 0x00, 0xf9, 0xdd, 0x78, 0xe2, 0x25, 0x6c, 0xdd, 0x32, 0x85, 0xcf, 0x15, + 0x43, 0xe7, 0xde, 0x4b, 0x71, 0xe0, 0x0c, 0x29, 0xb4, 0x05, 0x4b, 0x0e, 0x39, 0xd1, 0x79, 0x1f, + 0x9f, 0xea, 0x94, 0x59, 0xce, 0x60, 0x4b, 0xef, 0xeb, 0xac, 0x3a, 0x29, 0x6c, 0xaa, 0x9e, 0x9f, + 0xd5, 0x96, 0x70, 0x06, 0x1d, 0x67, 0x4a, 0xa9, 0x7f, 0x3e, 0x09, 0x73, 0x89, 0x78, 0x83, 0x9e, + 0xc1, 0x72, 0xc7, 0x4b, 0x4e, 0x3b, 0x6e, 0xff, 0x80, 0x38, 0xed, 0xce, 0x11, 0xe9, 0xba, 0x06, + 0xe9, 0x0a, 0x47, 0x99, 0x68, 0xae, 0x48, 0x8b, 0x97, 0xd7, 0x32, 0xb9, 0x70, 0x8e, 0x34, 0x1f, + 0x05, 0x53, 0x34, 0x6d, 0xeb, 0x94, 0x06, 0x98, 0x05, 0x81, 0x19, 0x8c, 0xc2, 0x4e, 0x8a, 0x03, + 0x67, 0x48, 0x71, 0x1b, 0xbb, 0x84, 0xea, 0x0e, 0xe9, 0x26, 0x6d, 0x2c, 0xc6, 0x6d, 0x5c, 0xcf, + 0xe4, 0xc2, 0x39, 0xd2, 0xe8, 0x5d, 0x98, 0xf6, 0xb4, 0x89, 0xf9, 0x93, 0x13, 0x1d, 0xa4, 0xc3, + 0x9d, 0x90, 0x84, 0xa3, 0x7c, 0xbc, 0x6b, 0xd6, 0x01, 0x25, 0xce, 0x09, 0xe9, 0xe6, 0x4f, 0xf0, + 0x6e, 0x8a, 0x03, 0x67, 0x48, 0xf1, 0xae, 0x79, 0x1e, 0x98, 0xea, 0xda, 0x64, 0xbc, 0x6b, 0xfb, + 0x99, 0x5c, 0x38, 0x47, 0x9a, 0xfb, 0xb1, 0x67, 0xf2, 0xea, 0x89, 0xa6, 0x1b, 0xda, 0x81, 0x41, + 0xaa, 0xe5, 0xb8, 0x1f, 0xef, 0xc4, 0xc9, 0x38, 0xc9, 0x8f, 0x9e, 0xc0, 0x82, 0xd7, 0xb4, 0x6f, + 0x6a, 0x01, 0x48, 0x45, 0x80, 0x7c, 0x4d, 0x82, 0x2c, 0xec, 0x24, 0x19, 0x70, 0x5a, 0x06, 0x7d, + 0x00, 0xb3, 0x1d, 0xcb, 0x30, 0x84, 0x3f, 0xae, 0x59, 0xae, 0xc9, 0xaa, 0x53, 0x02, 0x05, 0xf1, + 0xf5, 0xb8, 0x16, 0xa3, 0xe0, 0x04, 0x27, 0x22, 0x00, 0x1d, 0x3f, 0xe1, 0xd0, 0x2a, 0x8c, 0x54, + 0x6b, 0xa4, 0x93, 0x5e, 0x58, 0x03, 0x04, 0x4d, 0x14, 0x47, 0x80, 0xd5, 0x7f, 0x55, 0xe0, 0x66, + 0x4e, 0xe8, 0x40, 0xdf, 0x8a, 0xa5, 0xd8, 0x5f, 0x4e, 0xa4, 0xd8, 0x57, 0x72, 0xc4, 0x22, 0x79, + 0xd6, 0x84, 0x19, 0x87, 0xf7, 0xca, 0xec, 0x79, 0x2c, 0x32, 0x46, 0xbe, 0x3b, 0xa4, 0x1b, 0x38, + 0x2a, 0x13, 0xc6, 0xfc, 0x85, 0xf3, 0xb3, 0xda, 0x4c, 0x8c, 0x86, 0xe3, 0xf0, 0xea, 0x5f, 0x14, + 0x00, 0xd6, 0x89, 0x6d, 0x58, 0x83, 0x3e, 0x31, 0xaf, 0xa2, 0x86, 0xda, 0x8d, 0xd5, 0x50, 0x6f, + 0x0e, 0x9b, 0x9e, 0xc0, 0xb4, 0xdc, 0x22, 0xea, 0xd7, 0x13, 0x45, 0x54, 0x63, 0x74, 0xc8, 0x8b, + 0xab, 0xa8, 0x7f, 0x2f, 0xc2, 0x62, 0xc8, 0x1c, 0x96, 0x51, 0x8f, 0x62, 0x73, 0xfc, 0x4b, 0x89, + 0x39, 0xbe, 0x99, 0x21, 0xf2, 0xd2, 0xea, 0xa8, 0x17, 0x5f, 0xcf, 0xa0, 0x4f, 0x61, 0x96, 0x17, + 0x4e, 0x9e, 0x7b, 0x88, 0xb2, 0x6c, 0x72, 0xec, 0xb2, 0x2c, 0x48, 0xa0, 0x5b, 0x31, 0x24, 0x9c, + 0x40, 0xce, 0x29, 0x03, 0xcb, 0x2f, 0xbb, 0x0c, 0x54, 0x3f, 0x53, 0x60, 0x36, 0x9c, 0xa6, 0x2b, + 0x28, 0xda, 0x76, 0xe2, 0x45, 0xdb, 0xeb, 0x23, 0xbb, 0x68, 0x4e, 0xd5, 0xf6, 0xdf, 0xbc, 0xc0, + 0x0f, 0x98, 0xf8, 0x02, 0x3f, 0xd0, 0x3a, 0xc7, 0x23, 0x6c, 0xff, 0x7e, 0xa8, 0x00, 0x92, 0x59, + 0x60, 0xd5, 0x34, 0x2d, 0xa6, 0x79, 0xb1, 0xd2, 0x33, 0x6b, 0x73, 0x64, 0xb3, 0x7c, 0x8d, 0xf5, + 0xfd, 0x14, 0xd6, 0x63, 0x93, 0x39, 0x83, 0x70, 0x46, 0xd2, 0x0c, 0x38, 0xc3, 0x00, 0xa4, 0x01, + 0x38, 0x12, 0x73, 0xcf, 0x92, 0x0b, 0xf9, 0xcd, 0x11, 0x62, 0x1e, 0x17, 0x58, 0xb3, 0xcc, 0x43, + 0xbd, 0x17, 0x86, 0x1d, 0x1c, 0x00, 0xe1, 0x08, 0xe8, 0xad, 0xc7, 0x70, 0x33, 0xc7, 0x5a, 0x34, + 0x0f, 0xc5, 0x63, 0x32, 0xf0, 0x86, 0x0d, 0xf3, 0x9f, 0x68, 0x29, 0xba, 0x4d, 0x9e, 0x92, 0x3b, + 0xdc, 0x0f, 0x0a, 0xef, 0x2b, 0xea, 0x8f, 0x27, 0xa2, 0xbe, 0x23, 0x2a, 0xe6, 0x7b, 0x50, 0x71, + 0x88, 0x6d, 0xe8, 0x1d, 0x8d, 0xca, 0x42, 0xe8, 0xba, 0x77, 0xa4, 0xe1, 0xb5, 0xe1, 0x80, 0x1a, + 0xab, 0xad, 0x0b, 0x2f, 0xb7, 0xb6, 0x2e, 0xbe, 0x98, 0xda, 0xfa, 0xb7, 0xa0, 0x42, 0xfd, 0xaa, + 0xba, 0x24, 0x20, 0x1f, 0x8c, 0x11, 0x5f, 0x65, 0x41, 0x1d, 0x28, 0x08, 0x4a, 0xe9, 0x00, 0x34, + 0xab, 0x88, 0x9e, 0x18, 0xb3, 0x88, 0x7e, 0xa1, 0x85, 0x2f, 0x8f, 0xa9, 0xb6, 0xe6, 0x52, 0xd2, + 0x15, 0x81, 0xa8, 0x12, 0xc6, 0xd4, 0x96, 0x68, 0xc5, 0x92, 0x8a, 0x3e, 0x8e, 0xb9, 0x6c, 0xe5, + 0x32, 0x2e, 0x3b, 0x9b, 0xef, 0xae, 0x68, 0x1f, 0x6e, 0xda, 0x8e, 0xd5, 0x73, 0x08, 0xa5, 0xeb, + 0x44, 0xeb, 0x1a, 0xba, 0x49, 0xfc, 0xf1, 0xf1, 0x2a, 0xa2, 0x57, 0xce, 0xcf, 0x6a, 0x37, 0x5b, + 0xd9, 0x2c, 0x38, 0x4f, 0x56, 0xfd, 0xbc, 0x04, 0xf3, 0xc9, 0x0c, 0x98, 0x53, 0xa4, 0x2a, 0x97, + 0x2a, 0x52, 0xef, 0x47, 0x16, 0x83, 0x57, 0xc1, 0x47, 0xce, 0xf8, 0x52, 0x0b, 0x62, 0x15, 0xe6, + 0x64, 0x34, 0xf0, 0x89, 0xb2, 0x4c, 0x0f, 0x66, 0x7f, 0x3f, 0x4e, 0xc6, 0x49, 0x7e, 0x5e, 0x7a, + 0x86, 0x15, 0xa5, 0x0f, 0x52, 0x8a, 0x97, 0x9e, 0xab, 0x49, 0x06, 0x9c, 0x96, 0x41, 0xdb, 0xb0, + 0xe8, 0x9a, 0x69, 0x28, 0xcf, 0x1b, 0x5f, 0x91, 0x50, 0x8b, 0xfb, 0x69, 0x16, 0x9c, 0x25, 0x87, + 0x0e, 0x63, 0xd5, 0xe8, 0xa4, 0x88, 0xb0, 0x0f, 0x47, 0x5e, 0x3b, 0x23, 0x97, 0xa3, 0xe8, 0x11, + 0xcc, 0x38, 0x62, 0xdf, 0xe1, 0x1b, 0xec, 0xd5, 0xee, 0x37, 0xa4, 0xd8, 0x0c, 0x8e, 0x12, 0x71, + 0x9c, 0x37, 0xa3, 0xdc, 0xae, 0x8c, 0x5a, 0x6e, 0xab, 0xff, 0xac, 0x44, 0x93, 0x50, 0x50, 0x02, + 0x0f, 0x3b, 0x65, 0x4a, 0x49, 0x44, 0xaa, 0x23, 0x2b, 0xbb, 0xfa, 0x7d, 0x6f, 0xac, 0xea, 0x37, + 0x4c, 0x9e, 0xc3, 0xcb, 0xdf, 0x1f, 0x29, 0xb0, 0xbc, 0xd1, 0x7e, 0xe2, 0x58, 0xae, 0xed, 0x9b, + 0xb3, 0x6b, 0x7b, 0xe3, 0xfa, 0x0d, 0x28, 0x39, 0xae, 0xe1, 0xf7, 0xe3, 0x35, 0xbf, 0x1f, 0xd8, + 0x35, 0x78, 0x3f, 0x16, 0x13, 0x52, 0x5e, 0x27, 0xb8, 0x00, 0xda, 0x81, 0x49, 0x47, 0x33, 0x7b, + 0xc4, 0x4f, 0xab, 0x77, 0x87, 0x58, 0xbf, 0xb9, 0x8e, 0x39, 0x7b, 0xa4, 0x78, 0x13, 0xd2, 0x58, + 0xa2, 0xa8, 0x7f, 0xa4, 0xc0, 0xdc, 0xd3, 0xbd, 0xbd, 0xd6, 0xa6, 0x29, 0x56, 0xb4, 0x38, 0x7d, + 0xbf, 0x03, 0x25, 0x5b, 0x63, 0x47, 0xc9, 0x4c, 0xcf, 0x69, 0x58, 0x50, 0xd0, 0x77, 0xa1, 0xcc, + 0x23, 0x09, 0x31, 0xbb, 0x23, 0x96, 0xda, 0x12, 0xbe, 0xe9, 0x09, 0x85, 0x15, 0xa2, 0x6c, 0xc0, + 0x3e, 0x9c, 0x7a, 0x0c, 0x4b, 0x11, 0x73, 0xf8, 0x78, 0x88, 0x63, 0x60, 0xd4, 0x86, 0x09, 0xae, + 0xd9, 0x3f, 0xe5, 0x1d, 0x76, 0x98, 0x99, 0xe8, 0x52, 0x58, 0xe9, 0xf0, 0x2f, 0x8a, 0x3d, 0x2c, + 0x75, 0x1b, 0x66, 0xc4, 0x95, 0x83, 0xe5, 0x30, 0x31, 0x2c, 0xe8, 0x36, 0x14, 0xfb, 0xba, 0x29, + 0xf3, 0xec, 0xb4, 0x94, 0x29, 0xf2, 0x1c, 0xc1, 0xdb, 0x05, 0x59, 0x3b, 0x95, 0x91, 0x27, 0x24, + 0x6b, 0xa7, 0x98, 0xb7, 0xab, 0x4f, 0xa0, 0x2c, 0x87, 0x3b, 0x0a, 0x54, 0xbc, 0x18, 0xa8, 0x98, + 0x01, 0xb4, 0x0b, 0xe5, 0xcd, 0x56, 0xd3, 0xb0, 0xbc, 0xaa, 0xab, 0xa3, 0x77, 0x9d, 0xe4, 0x5c, + 0xac, 0x6d, 0xae, 0x63, 0x2c, 0x28, 0x48, 0x85, 0x49, 0x72, 0xda, 0x21, 0x36, 0x13, 0x1e, 0x31, + 0xd5, 0x04, 0x3e, 0xcb, 0x8f, 0x45, 0x0b, 0x96, 0x14, 0xf5, 0x8f, 0x0b, 0x50, 0x96, 0xc3, 0x71, + 0x05, 0xbb, 0xb0, 0xad, 0xd8, 0x2e, 0xec, 0x8d, 0xd1, 0x5c, 0x23, 0x77, 0x0b, 0xb6, 0x97, 0xd8, + 0x82, 0xdd, 0x1f, 0x11, 0xef, 0xe2, 0xfd, 0xd7, 0x3f, 0x28, 0x30, 0x1b, 0x77, 0x4a, 0xf4, 0x2e, + 0x4c, 0xf3, 0x84, 0xa3, 0x77, 0xc8, 0x4e, 0x58, 0xe7, 0x06, 0x87, 0x30, 0xed, 0x90, 0x84, 0xa3, + 0x7c, 0xa8, 0x17, 0x88, 0x71, 0x3f, 0x92, 0x9d, 0xce, 0x1f, 0x52, 0x97, 0xe9, 0x46, 0xdd, 0xbb, + 0x46, 0xab, 0x6f, 0x9a, 0x6c, 0xd7, 0x69, 0x33, 0x47, 0x37, 0x7b, 0x29, 0x45, 0xc2, 0x29, 0xa3, + 0xc8, 0xea, 0x3f, 0x29, 0x30, 0x2d, 0x4d, 0xbe, 0x82, 0x5d, 0xc5, 0xaf, 0xc5, 0x77, 0x15, 0x77, + 0x47, 0x5c, 0xe0, 0xd9, 0x5b, 0x8a, 0xbf, 0x09, 0x4d, 0xe7, 0x4b, 0x9a, 0x7b, 0xf5, 0x91, 0x45, + 0x59, 0xd2, 0xab, 0xf9, 0x62, 0xc4, 0x82, 0x82, 0x5c, 0x98, 0xd7, 0x13, 0x31, 0x40, 0x0e, 0x6d, + 0x63, 0x34, 0x4b, 0x02, 0xb1, 0x66, 0x55, 0xc2, 0xcf, 0x27, 0x29, 0x38, 0xa5, 0x42, 0x25, 0x90, + 0xe2, 0x42, 0x1f, 0x42, 0xe9, 0x88, 0x31, 0x3b, 0xe3, 0xbc, 0x7a, 0x48, 0xe4, 0x09, 0x4d, 0xa8, + 0x88, 0xde, 0xed, 0xed, 0xb5, 0xb0, 0x80, 0x52, 0xff, 0x27, 0x1c, 0x8f, 0xb6, 0xe7, 0xe3, 0x41, + 0x3c, 0x55, 0x2e, 0x13, 0x4f, 0xa7, 0xb3, 0x62, 0x29, 0x7a, 0x0a, 0x45, 0x66, 0x8c, 0xba, 0x2d, + 0x94, 0x88, 0x7b, 0x5b, 0xed, 0x30, 0x20, 0xed, 0x6d, 0xb5, 0x31, 0x87, 0x40, 0xbb, 0x30, 0xc1, + 0xb3, 0x0f, 0x5f, 0x82, 0xc5, 0xd1, 0x97, 0x34, 0xef, 0x7f, 0xe8, 0x10, 0xfc, 0x8b, 0x62, 0x0f, + 0x47, 0xfd, 0x3e, 0xcc, 0xc4, 0xd6, 0x29, 0xfa, 0x04, 0xae, 0x1b, 0x96, 0xd6, 0x6d, 0x6a, 0x86, + 0x66, 0x76, 0x88, 0x7f, 0x39, 0x70, 0x37, 0x6b, 0x87, 0xb1, 0x15, 0xe1, 0x93, 0xab, 0x3c, 0xb8, + 0x4e, 0x8d, 0xd2, 0x70, 0x0c, 0x51, 0xd5, 0x00, 0xc2, 0x3e, 0xa2, 0x1a, 0x4c, 0x70, 0x3f, 0xf3, + 0xf2, 0xc9, 0x54, 0x73, 0x8a, 0x5b, 0xc8, 0xdd, 0x8f, 0x62, 0xaf, 0x1d, 0x3d, 0x04, 0xa0, 0xa4, + 0xe3, 0x10, 0x26, 0x82, 0x41, 0x21, 0x7e, 0x05, 0xdd, 0x0e, 0x28, 0x38, 0xc2, 0xa5, 0xfe, 0x8b, + 0x02, 0x33, 0x3b, 0x84, 0xfd, 0xc0, 0x72, 0x8e, 0x5b, 0x96, 0xa1, 0x77, 0x06, 0x57, 0x10, 0x6c, + 0x71, 0x2c, 0xd8, 0xbe, 0x35, 0x64, 0x66, 0x62, 0xd6, 0xe5, 0x85, 0x5c, 0xf5, 0x33, 0x05, 0x6e, + 0xc6, 0x38, 0x1f, 0x87, 0x4b, 0x77, 0x1f, 0x26, 0x6c, 0xcb, 0x61, 0x7e, 0x22, 0x1e, 0x4b, 0x21, + 0x0f, 0x63, 0x91, 0x54, 0xcc, 0x61, 0xb0, 0x87, 0x86, 0xb6, 0xa0, 0xc0, 0x2c, 0xe9, 0xaa, 0xe3, + 0x61, 0x12, 0xe2, 0x34, 0x41, 0x62, 0x16, 0xf6, 0x2c, 0x5c, 0x60, 0x16, 0x9f, 0x88, 0x6a, 0x8c, + 0x2b, 0x1a, 0x7c, 0x5e, 0x52, 0x0f, 0x30, 0x94, 0x0e, 0x1d, 0xab, 0x7f, 0xe9, 0x3e, 0x04, 0x13, + 0xb1, 0xe1, 0x58, 0x7d, 0x2c, 0xb0, 0xd4, 0x9f, 0x28, 0xb0, 0x10, 0xe3, 0xbc, 0x82, 0xc0, 0xff, + 0x61, 0x3c, 0xf0, 0xdf, 0x1f, 0xa7, 0x23, 0x39, 0xe1, 0xff, 0x27, 0x85, 0x44, 0x37, 0x78, 0x87, + 0xd1, 0x21, 0x4c, 0xdb, 0x56, 0xb7, 0xfd, 0x02, 0xae, 0x03, 0xe7, 0x78, 0xde, 0x6c, 0x85, 0x58, + 0x38, 0x0a, 0x8c, 0x4e, 0x61, 0xc1, 0xd4, 0xfa, 0x84, 0xda, 0x5a, 0x87, 0xb4, 0x5f, 0xc0, 0x01, + 0xc9, 0x0d, 0x71, 0xdf, 0x90, 0x44, 0xc4, 0x69, 0x25, 0x68, 0x1b, 0xca, 0xba, 0x2d, 0xea, 0x38, + 0x59, 0xbb, 0x0c, 0xcd, 0xa2, 0x5e, 0xd5, 0xe7, 0xc5, 0x73, 0xf9, 0x81, 0x7d, 0x0c, 0xf5, 0x6f, + 0x93, 0xde, 0xc0, 0xfd, 0x0f, 0x3d, 0x81, 0x8a, 0x78, 0x84, 0xd3, 0xb1, 0x0c, 0xff, 0x66, 0x80, + 0xcf, 0x6c, 0x4b, 0xb6, 0x3d, 0x3f, 0xab, 0xbd, 0x92, 0x71, 0xe8, 0xeb, 0x93, 0x71, 0x20, 0x8c, + 0x76, 0xa0, 0x64, 0x7f, 0x95, 0x0a, 0x46, 0x24, 0x39, 0x51, 0xb6, 0x08, 0x1c, 0xf5, 0xf7, 0x8a, + 0x09, 0x73, 0x45, 0xaa, 0xfb, 0xf4, 0x85, 0xcd, 0x7a, 0x50, 0x31, 0xe5, 0xce, 0xfc, 0x01, 0x94, + 0x65, 0x86, 0x97, 0xce, 0xfc, 0x8d, 0x71, 0x9c, 0x39, 0x9a, 0xc5, 0x82, 0x0d, 0x8b, 0xdf, 0xe8, + 0x03, 0xa3, 0xef, 0xc1, 0x24, 0xf1, 0x54, 0x78, 0xb9, 0xf1, 0xbd, 0x71, 0x54, 0x84, 0x71, 0x35, + 0x2c, 0x54, 0x65, 0x9b, 0x44, 0x45, 0xdf, 0xe2, 0xe3, 0xc5, 0x79, 0xf9, 0x26, 0x90, 0x56, 0x4b, + 0x22, 0x5d, 0xdd, 0xf6, 0xba, 0x1d, 0x34, 0x3f, 0x3f, 0xab, 0x41, 0xf8, 0x89, 0xa3, 0x12, 0xea, + 0xbf, 0x29, 0xb0, 0x20, 0x46, 0xa8, 0xe3, 0x3a, 0x3a, 0x1b, 0x5c, 0x59, 0x62, 0x7a, 0x16, 0x4b, + 0x4c, 0xef, 0x0c, 0x19, 0x96, 0x94, 0x85, 0xb9, 0xc9, 0xe9, 0xa7, 0x0a, 0xdc, 0x48, 0x71, 0x5f, + 0x41, 0x5c, 0xdc, 0x8f, 0xc7, 0xc5, 0xb7, 0xc6, 0xed, 0x50, 0x4e, 0x6c, 0xfc, 0xab, 0xb9, 0x8c, + 0xee, 0x88, 0x95, 0xf2, 0x10, 0xc0, 0x76, 0xf4, 0x13, 0xdd, 0x20, 0x3d, 0x79, 0x09, 0x5e, 0x89, + 0x3c, 0x82, 0x0b, 0x28, 0x38, 0xc2, 0x85, 0x28, 0x2c, 0x77, 0xc9, 0xa1, 0xe6, 0x1a, 0x6c, 0xb5, + 0xdb, 0x5d, 0xd3, 0x6c, 0xed, 0x40, 0x37, 0x74, 0xa6, 0xcb, 0xe3, 0x82, 0xa9, 0xe6, 0x23, 0xef, + 0x72, 0x3a, 0x8b, 0xe3, 0xf9, 0x59, 0xed, 0x76, 0xd6, 0xed, 0x90, 0xcf, 0x32, 0xc0, 0x39, 0xd0, + 0x68, 0x00, 0x55, 0x87, 0x7c, 0xdf, 0xd5, 0x1d, 0xd2, 0x5d, 0x77, 0x2c, 0x3b, 0xa6, 0xb6, 0x28, + 0xd4, 0xfe, 0xea, 0xf9, 0x59, 0xad, 0x8a, 0x73, 0x78, 0x86, 0x2b, 0xce, 0x85, 0x47, 0x9f, 0xc2, + 0xa2, 0xe6, 0xbd, 0x1d, 0x8c, 0x69, 0xf5, 0x56, 0xc9, 0xfb, 0xe7, 0x67, 0xb5, 0xc5, 0xd5, 0x34, + 0x79, 0xb8, 0xc2, 0x2c, 0x50, 0xd4, 0x80, 0xf2, 0x89, 0x78, 0xd9, 0x48, 0xab, 0x13, 0x02, 0x9f, + 0x27, 0x82, 0xb2, 0xf7, 0xd8, 0x91, 0x63, 0x4e, 0x6e, 0xb4, 0xc5, 0xea, 0xf3, 0xb9, 0xf8, 0x86, + 0x92, 0xd7, 0x92, 0x72, 0xc5, 0x8b, 0x13, 0xe3, 0x4a, 0x18, 0xb5, 0x9e, 0x86, 0x24, 0x1c, 0xe5, + 0x43, 0x1f, 0xc3, 0xd4, 0x91, 0x3c, 0x95, 0xa0, 0xd5, 0xf2, 0x48, 0x49, 0x38, 0x76, 0x8a, 0xd1, + 0x5c, 0x90, 0x2a, 0xa6, 0xfc, 0x66, 0x8a, 0x43, 0x44, 0xf4, 0x3a, 0x94, 0xc5, 0xc7, 0xe6, 0xba, + 0x38, 0x8e, 0xab, 0x84, 0xb1, 0xed, 0xa9, 0xd7, 0x8c, 0x7d, 0xba, 0xcf, 0xba, 0xd9, 0x5a, 0x13, + 0xc7, 0xc2, 0x09, 0xd6, 0xcd, 0xd6, 0x1a, 0xf6, 0xe9, 0xe8, 0x13, 0x28, 0x53, 0xb2, 0xa5, 0x9b, + 0xee, 0x69, 0x15, 0x46, 0xba, 0x54, 0x6e, 0x3f, 0x16, 0xdc, 0x89, 0x83, 0xb1, 0x50, 0x83, 0xa4, + 0x63, 0x1f, 0x16, 0x1d, 0xc1, 0x94, 0xe3, 0x9a, 0xab, 0x74, 0x9f, 0x12, 0xa7, 0x3a, 0x2d, 0x74, + 0x0c, 0x0b, 0xe7, 0xd8, 0xe7, 0x4f, 0x6a, 0x09, 0x46, 0x28, 0xe0, 0xc0, 0x21, 0x38, 0xfa, 0x43, + 0x05, 0x10, 0x75, 0x6d, 0xdb, 0x20, 0x7d, 0x62, 0x32, 0xcd, 0x10, 0x67, 0x71, 0xb4, 0x7a, 0x5d, + 0xe8, 0xfc, 0xf6, 0xb0, 0x7e, 0xa5, 0x04, 0x93, 0xca, 0x83, 0x43, 0xef, 0x34, 0x2b, 0xce, 0xd0, + 0xcb, 0x87, 0xf6, 0x90, 0x8a, 0xdf, 0xd5, 0x99, 0x91, 0x86, 0x36, 0xfb, 0xcc, 0x31, 0x1c, 0x5a, + 0x49, 0xc7, 0x3e, 0x2c, 0x7a, 0x06, 0xcb, 0xfe, 0xc3, 0x58, 0x6c, 0x59, 0x6c, 0x43, 0x37, 0x08, + 0x1d, 0x50, 0x46, 0xfa, 0xd5, 0x59, 0x31, 0xed, 0xc1, 0xdb, 0x0f, 0x9c, 0xc9, 0x85, 0x73, 0xa4, + 0x51, 0x1f, 0x6a, 0x7e, 0xc8, 0xe0, 0xeb, 0x29, 0x88, 0x59, 0x8f, 0x69, 0x47, 0x33, 0xbc, 0x7b, + 0x80, 0x39, 0xa1, 0xe0, 0xb5, 0xf3, 0xb3, 0x5a, 0x6d, 0xfd, 0x62, 0x56, 0x3c, 0x0c, 0x0b, 0x7d, + 0x17, 0xaa, 0x5a, 0x9e, 0x9e, 0x79, 0xa1, 0xe7, 0x55, 0x1e, 0x87, 0x72, 0x15, 0xe4, 0x4a, 0x23, + 0x06, 0xf3, 0x5a, 0xfc, 0x89, 0x32, 0xad, 0x2e, 0x8c, 0x74, 0x10, 0x99, 0x78, 0xd9, 0x1c, 0x1e, + 0x46, 0x24, 0x08, 0x14, 0xa7, 0x34, 0xa0, 0xdf, 0x06, 0xa4, 0x25, 0x5f, 0x55, 0xd3, 0x2a, 0x1a, + 0x29, 0xfd, 0xa4, 0x9e, 0x63, 0x87, 0x6e, 0x97, 0x22, 0x51, 0x9c, 0xa1, 0x07, 0x6d, 0xc1, 0x92, + 0x6c, 0xdd, 0x37, 0xa9, 0x76, 0x48, 0xda, 0x03, 0xda, 0x61, 0x06, 0xad, 0x2e, 0x8a, 0xd8, 0x27, + 0x2e, 0xbe, 0x56, 0x33, 0xe8, 0x38, 0x53, 0x0a, 0x7d, 0x1b, 0xe6, 0x0f, 0x2d, 0xe7, 0x40, 0xef, + 0x76, 0x89, 0xe9, 0x23, 0x2d, 0x09, 0xa4, 0x25, 0x3e, 0x1a, 0x1b, 0x09, 0x1a, 0x4e, 0x71, 0x23, + 0x0a, 0x37, 0x24, 0x72, 0xcb, 0xb1, 0x3a, 0xdb, 0x96, 0x6b, 0x32, 0xaf, 0x24, 0xba, 0x11, 0xa4, + 0x98, 0x1b, 0xab, 0x59, 0x0c, 0xcf, 0xcf, 0x6a, 0x77, 0xb2, 0x2b, 0xe0, 0x90, 0x09, 0x67, 0x63, + 0x8b, 0x17, 0x2c, 0xf2, 0x3e, 0xe3, 0x6a, 0x5e, 0x01, 0x8f, 0xf7, 0x82, 0x25, 0x34, 0xed, 0x85, + 0xbd, 0x60, 0x89, 0x40, 0x5e, 0x7c, 0x82, 0xfa, 0x5f, 0x05, 0x58, 0x0c, 0x99, 0x47, 0x7e, 0xc1, + 0x92, 0x21, 0xf2, 0x8b, 0x97, 0xc0, 0xc3, 0x5f, 0x02, 0x7f, 0xa6, 0xc0, 0x6c, 0x38, 0x74, 0xff, + 0xf7, 0x5e, 0x95, 0x84, 0xb6, 0xe5, 0xd4, 0xb9, 0x7f, 0x5f, 0x88, 0x76, 0xe0, 0xff, 0xfd, 0xd3, + 0x86, 0xaf, 0xfe, 0x7c, 0x57, 0xfd, 0x69, 0x11, 0xe6, 0x93, 0xab, 0x31, 0x76, 0x03, 0xae, 0x0c, + 0xbd, 0x01, 0x6f, 0xc1, 0xd2, 0xa1, 0x6b, 0x18, 0x03, 0x31, 0x0c, 0x91, 0x6b, 0x70, 0xef, 0x06, + 0xeb, 0x55, 0x29, 0xb9, 0xb4, 0x91, 0xc1, 0x83, 0x33, 0x25, 0x73, 0x6e, 0xf3, 0x8b, 0x97, 0xba, + 0xcd, 0x4f, 0x5d, 0x2e, 0x97, 0xc6, 0xb8, 0x5c, 0xce, 0xbc, 0x99, 0x9f, 0xb8, 0xc4, 0xcd, 0xfc, + 0x65, 0xae, 0xd2, 0x33, 0x82, 0xd8, 0xd0, 0x97, 0x9d, 0xaf, 0xc2, 0x2d, 0x29, 0xc6, 0xc4, 0x2d, + 0xb7, 0xc9, 0x1c, 0xcb, 0x30, 0x88, 0xb3, 0xee, 0xf6, 0xfb, 0x03, 0xf5, 0x9b, 0x30, 0x1b, 0x7f, + 0xbf, 0xe1, 0xcd, 0xb4, 0xf7, 0x84, 0x44, 0xde, 0x23, 0x46, 0x66, 0xda, 0x6b, 0xc7, 0x01, 0x87, + 0xfa, 0xfb, 0x0a, 0x2c, 0x67, 0xbf, 0xd3, 0x44, 0x06, 0xcc, 0xf6, 0xb5, 0xd3, 0xe8, 0xdb, 0x59, + 0xe5, 0x92, 0x27, 0x3c, 0xe2, 0xe2, 0x7e, 0x3b, 0x86, 0x85, 0x13, 0xd8, 0xea, 0x97, 0x0a, 0xdc, + 0xcc, 0xb9, 0x32, 0xbf, 0x5a, 0x4b, 0xd0, 0x47, 0x50, 0xe9, 0x6b, 0xa7, 0x6d, 0xd7, 0xe9, 0x91, + 0x4b, 0x9f, 0x69, 0x89, 0x88, 0xb1, 0x2d, 0x51, 0x70, 0x80, 0xa7, 0xfe, 0x48, 0x81, 0x6a, 0xde, + 0xee, 0x02, 0xbd, 0x1b, 0xbb, 0xdc, 0xff, 0x7a, 0xe2, 0x72, 0x7f, 0x21, 0x25, 0xf7, 0x92, 0xae, + 0xf6, 0xff, 0x4e, 0x81, 0xe5, 0xec, 0x5d, 0x16, 0x7a, 0x3b, 0x66, 0x61, 0x2d, 0x61, 0xe1, 0x5c, + 0x42, 0x4a, 0xda, 0xf7, 0x3d, 0x98, 0x95, 0x7b, 0x31, 0x09, 0x23, 0x47, 0x55, 0xcd, 0x8a, 0x95, + 0x12, 0xc2, 0xdf, 0x7b, 0x88, 0xf9, 0x8a, 0xb7, 0xe1, 0x04, 0x9a, 0xfa, 0x07, 0x05, 0x98, 0x68, + 0x77, 0x34, 0x83, 0x5c, 0x41, 0x99, 0xf5, 0x9d, 0x58, 0x99, 0x35, 0xec, 0x7f, 0x2e, 0xc2, 0xaa, + 0xdc, 0x0a, 0x0b, 0x27, 0x2a, 0xac, 0x37, 0x46, 0x42, 0xbb, 0xb8, 0xb8, 0xfa, 0x15, 0x98, 0x0a, + 0x94, 0x8e, 0x17, 0xf3, 0xd5, 0xbf, 0x2e, 0xc0, 0x74, 0x44, 0xc5, 0x98, 0x19, 0xe3, 0x30, 0x96, + 0x69, 0x47, 0xf9, 0x77, 0x61, 0x44, 0x57, 0xdd, 0xcf, 0xad, 0xde, 0x3b, 0xcd, 0xf0, 0x65, 0x5e, + 0x3a, 0xe5, 0x7e, 0x13, 0x66, 0x99, 0xf8, 0xf7, 0x5d, 0x70, 0x12, 0x5c, 0x14, 0xbe, 0x18, 0xbc, + 0xee, 0xdd, 0x8b, 0x51, 0x71, 0x82, 0xfb, 0xd6, 0x23, 0x98, 0x89, 0x29, 0x1b, 0xeb, 0x99, 0xe5, + 0x3f, 0x2a, 0xf0, 0xf5, 0xa1, 0xfb, 0x74, 0xd4, 0x8c, 0x2d, 0x92, 0x7a, 0x62, 0x91, 0xac, 0xe4, + 0x03, 0xbc, 0xbc, 0xe7, 0x3a, 0xcd, 0x37, 0x3f, 0xff, 0x62, 0xe5, 0xda, 0xcf, 0xbe, 0x58, 0xb9, + 0xf6, 0xf3, 0x2f, 0x56, 0xae, 0xfd, 0xee, 0xf9, 0x8a, 0xf2, 0xf9, 0xf9, 0x8a, 0xf2, 0xb3, 0xf3, + 0x15, 0xe5, 0xe7, 0xe7, 0x2b, 0xca, 0x7f, 0x9c, 0xaf, 0x28, 0x7f, 0xf2, 0xe5, 0xca, 0xb5, 0x8f, + 0xca, 0x12, 0xee, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xa7, 0xd7, 0xb5, 0x56, 0x5c, 0x3d, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 7d802b944..38e112d1e 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -18,7 +18,8 @@ package v1beta1 import ( appsv1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -49,20 +50,22 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } +// +genclient +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -74,6 +77,29 @@ type ReplicationControllerDummy struct { metav1.TypeMeta `json:",inline"` } +// Alpha-level support for Custom Metrics in HPA (as annotations). +type CustomMetricTarget struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricTargetList struct { + Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +type CustomMetricCurrentStatus struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricCurrentStatusList struct { + Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` +} + // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -125,8 +151,6 @@ type DeploymentSpec struct { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. - // This is set to the max value of int32 (i.e. 2147483647) by default, which - // means "retaining all old RelicaSets". // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` @@ -229,7 +253,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -489,12 +513,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -502,7 +526,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -526,7 +550,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -541,21 +565,20 @@ type DaemonSetList struct { // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. -// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -566,7 +589,7 @@ type Ingress struct { type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -717,12 +740,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -730,7 +753,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -741,7 +764,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -845,7 +868,7 @@ type ReplicaSetCondition struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -895,11 +918,6 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -928,11 +946,6 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -957,11 +970,6 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -1008,7 +1016,6 @@ var ( ConfigMap FSType = "configMap" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" - CSI FSType = "csi" All FSType = "*" ) @@ -1019,12 +1026,6 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. // Deprecated: use HostPortRange from policy API Group instead. @@ -1071,17 +1072,6 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - // IDRange provides a min/max of an allowed range of IDs. // Deprecated: use IDRange from policy API Group instead. type IDRange struct { @@ -1108,23 +1098,6 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. // Deprecated: use FSGroupStrategyOptions from policy API Group instead. type FSGroupStrategyOptions struct { @@ -1177,25 +1150,6 @@ const ( SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. @@ -1203,7 +1157,7 @@ const AllowAllRuntimeClassNames = "*" type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1211,7 +1165,6 @@ type PodSecurityPolicyList struct { Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. @@ -1219,7 +1172,7 @@ type PodSecurityPolicyList struct { type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1270,7 +1223,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // Valid options are Ingress, Egress, or Ingress,Egress. // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -1389,7 +1342,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 91632260c..cdbc490a5 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -27,15 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", "driver": "driver is the name of the Flexvolume driver.", @@ -55,11 +46,30 @@ func (AllowedHostPath) SwaggerDoc() map[string]string { return map_AllowedHostPath } +var map_CustomMetricCurrentStatus = map[string]string{ + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { + return map_CustomMetricCurrentStatus +} + +var map_CustomMetricTarget = map[string]string{ + "": "Alpha-level support for Custom Metrics in HPA (as annotations).", + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricTarget) SwaggerDoc() map[string]string { + return map_CustomMetricTarget +} + var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -81,7 +91,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -104,7 +114,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -183,7 +193,7 @@ var map_DeploymentSpec = map[string]string{ "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".", @@ -279,10 +289,10 @@ func (IPBlock) SwaggerDoc() map[string]string { } var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -301,7 +311,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } @@ -358,7 +368,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -388,7 +398,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -422,7 +432,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -431,7 +441,7 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -441,7 +451,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -462,7 +472,6 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -470,11 +479,9 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -483,9 +490,9 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -507,7 +514,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -570,23 +577,13 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", @@ -597,16 +594,6 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", @@ -619,9 +606,9 @@ func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index cb6101796..65801c23e 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,49 +28,113 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(AllowedCSIDriver) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { if in == nil { return nil } - out := new(AllowedFlexVolume) + out := new(AllowedHostPath) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { +func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. +func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { if in == nil { return nil } - out := new(AllowedHostPath) + out := new(CustomMetricCurrentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. +func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { + if in == nil { + return nil + } + out := new(CustomMetricCurrentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. +func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { + if in == nil { + return nil + } + out := new(CustomMetricTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. +func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { + if in == nil { + return nil + } + out := new(CustomMetricTargetList) in.DeepCopyInto(out) return out } @@ -124,7 +188,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -280,7 +344,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -595,7 +659,7 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -826,7 +890,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) @@ -979,7 +1043,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -1038,11 +1102,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -1065,11 +1124,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) @@ -1085,11 +1139,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } return } @@ -1152,7 +1201,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -1319,27 +1368,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in @@ -1361,32 +1389,6 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index d3ffd5ed1..ef9ae2ae4 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io - package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 86bd80c85..7b1c04b29 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -445,6 +446,24 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 59331111f..ce70448d3 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -29,7 +29,7 @@ import ( type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // Valid options are Ingress, Egress, or Ingress,Egress. // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -194,7 +194,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index cfcd0c54c..f4363bc09 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -39,7 +39,7 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -69,7 +69,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -103,7 +103,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 1833e9782..d1e4e8845 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -139,7 +139,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go deleted file mode 100644 index 14430cbac..000000000 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ /dev/null @@ -1,1953 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto - - It has these top-level messages: - HTTPIngressPath - HTTPIngressRuleValue - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func init() { - proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") - proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") - proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") - proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") - proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") - proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") - proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") - proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") - proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") -} -func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n1, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Ingress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n3, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n4, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - return i, nil -} - -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n5, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *IngressList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *IngressRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n7, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - return i, nil -} - -func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n8, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *IngressSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n9, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *IngressStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n10, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil -} - -func (m *IngressTLS) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *HTTPIngressPath) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Ingress) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressBackend) Size() (n int) { - var l int - _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressRule) Size() (n int) { - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressRuleValue) Size() (n int) { - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressSpec) Size() (n int) { - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressTLS) Size() (n int) { - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ingress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressBackend) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressTLS) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 812 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, - 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, - 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, - 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, - 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, - 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, - 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, - 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, - 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, - 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, - 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, - 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, - 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, - 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, - 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, - 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, - 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, - 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, - 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, - 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, - 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, - 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, - 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, - 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, - 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, - 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, - 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, - 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, - 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, - 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, - 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, - 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, - 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, - 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, - 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, - 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, - 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, - 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, - 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, - 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, - 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, - 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, - 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, - 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, - 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, - 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, - 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, - 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, - 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, - 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go deleted file mode 100644 index c046c4901..000000000 --- a/vendor/k8s.io/api/networking/v1beta1/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "networking.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder holds functions that add things to a scheme - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - - // AddToScheme adds the types of this group into the given scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Ingress{}, - &IngressList{}, - ) - // Add the watch version that applies - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go deleted file mode 100644 index 63bf2d52a..000000000 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ /dev/null @@ -1,192 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -type Ingress struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IngressList is a collection of Ingress. -type IngressList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Ingress. - Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// IngressSpec describes the Ingress the user wishes to exist. -type IngressSpec struct { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - // +optional - Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - // +optional - TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - // +optional - Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - // TODO: Add the ability to specify load-balancer IP through claims -} - -// IngressTLS describes the transport layer security associated with an Ingress. -type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - // +optional - Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - // +optional - SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` - // TODO: Consider specifying different modes of termination, protocols etc. -} - -// IngressStatus describe the current state of the Ingress. -type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. - // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - // +optional - Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -type IngressRuleValue struct { - //TODO: - // 1. Consider renaming this resource and the associated rules so they - // aren't tied to Ingress. They can be used to route intra-cluster traffic. - // 2. Consider adding fields for ingress-type specific global options - // usable by a loadbalancer, like http keep-alive. - - // +optional - HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http:///? -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. - Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` - // TODO: Consider adding fields for ingress-type specific global - // options usable by a loadbalancer, like http keep-alive. -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -type HTTPIngressPath struct { - // Path is an extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` -} - -// IngressBackend describes all endpoints for a given service and port. -type IngressBackend struct { - // Specifies the name of the referenced service. - ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` - - // Specifies the port of the referenced service. - ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` -} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 9e05b7f1b..000000000 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_HTTPIngressPath = map[string]string{ - "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", -} - -func (HTTPIngressPath) SwaggerDoc() map[string]string { - return map_HTTPIngressPath -} - -var map_HTTPIngressRuleValue = map[string]string{ - "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", -} - -func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { - return map_HTTPIngressRuleValue -} - -var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Ingress) SwaggerDoc() map[string]string { - return map_Ingress -} - -var map_IngressBackend = map[string]string{ - "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", -} - -func (IngressBackend) SwaggerDoc() map[string]string { - return map_IngressBackend -} - -var map_IngressList = map[string]string{ - "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", -} - -func (IngressList) SwaggerDoc() map[string]string { - return map_IngressList -} - -var map_IngressRule = map[string]string{ - "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", -} - -func (IngressRule) SwaggerDoc() map[string]string { - return map_IngressRule -} - -var map_IngressRuleValue = map[string]string{ - "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", -} - -func (IngressRuleValue) SwaggerDoc() map[string]string { - return map_IngressRuleValue -} - -var map_IngressSpec = map[string]string{ - "": "IngressSpec describes the Ingress the user wishes to exist.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", -} - -func (IngressSpec) SwaggerDoc() map[string]string { - return map_IngressSpec -} - -var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", -} - -func (IngressStatus) SwaggerDoc() map[string]string { - return map_IngressStatus -} - -var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", -} - -func (IngressTLS) SwaggerDoc() map[string]string { - return map_IngressTLS -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go deleted file mode 100644 index 16f5af929..000000000 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ /dev/null @@ -1,696 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList - RuntimeClassSpec -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } -func (*RuntimeClassSpec) ProtoMessage() {} -func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") - proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") - proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") -} -func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) - i += copy(dAtA[i:], m.RuntimeHandler) - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *RuntimeClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RuntimeClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassSpec) Size() (n int) { - var l int - _ = l - l = len(m.RuntimeHandler) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RuntimeClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassSpec{`, - `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *RuntimeClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RuntimeClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 421 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x6b, 0xd4, 0x40, - 0x14, 0xc7, 0x33, 0xb5, 0x85, 0x75, 0x5a, 0x4b, 0xc9, 0x41, 0xc2, 0x1e, 0xa6, 0x65, 0x0f, 0x52, - 0x04, 0x67, 0xdc, 0x22, 0xe2, 0x49, 0x30, 0x5e, 0x14, 0x2b, 0x42, 0xbc, 0x89, 0x07, 0x27, 0xc9, - 0x33, 0x19, 0xb3, 0xc9, 0x0c, 0x99, 0x49, 0xc0, 0x9b, 0x1f, 0xc1, 0x2f, 0xa4, 0xe7, 0x3d, 0xf6, - 0xd8, 0x53, 0x71, 0xe3, 0x17, 0x91, 0x99, 0x64, 0xbb, 0xdb, 0x2e, 0xc5, 0xbd, 0xe5, 0xbd, 0xf9, - 0xff, 0x7f, 0xef, 0xfd, 0x5f, 0xf0, 0xab, 0xe2, 0x85, 0xa6, 0x42, 0xb2, 0xa2, 0x89, 0xa1, 0xae, - 0xc0, 0x80, 0x66, 0x2d, 0x54, 0xa9, 0xac, 0xd9, 0xf0, 0xc0, 0x95, 0x60, 0x95, 0x4c, 0x81, 0xb5, - 0x53, 0x3e, 0x53, 0x39, 0x9f, 0xb2, 0x0c, 0x2a, 0xa8, 0xb9, 0x81, 0x94, 0xaa, 0x5a, 0x1a, 0xe9, - 0x07, 0xbd, 0x92, 0x72, 0x25, 0xa8, 0x55, 0xd2, 0xa5, 0x72, 0xfc, 0x24, 0x13, 0x26, 0x6f, 0x62, - 0x9a, 0xc8, 0x92, 0x65, 0x32, 0x93, 0xcc, 0x19, 0xe2, 0xe6, 0xab, 0xab, 0x5c, 0xe1, 0xbe, 0x7a, - 0xd0, 0xf8, 0xd9, 0x6a, 0x64, 0xc9, 0x93, 0x5c, 0x54, 0x50, 0x7f, 0x67, 0xaa, 0xc8, 0x6c, 0x43, - 0xb3, 0x12, 0x0c, 0x67, 0xed, 0xc6, 0xf8, 0x31, 0xbb, 0xcb, 0x55, 0x37, 0x95, 0x11, 0x25, 0x6c, - 0x18, 0x9e, 0xff, 0xcf, 0xa0, 0x93, 0x1c, 0x4a, 0x7e, 0xdb, 0x37, 0xf9, 0x8d, 0xf0, 0x41, 0xd4, - 0x4b, 0x5e, 0xcf, 0xb8, 0xd6, 0xfe, 0x17, 0x3c, 0xb2, 0x4b, 0xa5, 0xdc, 0xf0, 0x00, 0x9d, 0xa0, - 0xd3, 0xfd, 0xb3, 0xa7, 0x74, 0x75, 0x8b, 0x6b, 0x36, 0x55, 0x45, 0x66, 0x1b, 0x9a, 0x5a, 0x35, - 0x6d, 0xa7, 0xf4, 0x43, 0xfc, 0x0d, 0x12, 0xf3, 0x1e, 0x0c, 0x0f, 0xfd, 0xf9, 0xd5, 0xb1, 0xd7, - 0x5d, 0x1d, 0xe3, 0x55, 0x2f, 0xba, 0xa6, 0xfa, 0xe7, 0x78, 0x57, 0x2b, 0x48, 0x82, 0x1d, 0x47, - 0x7f, 0x4c, 0xef, 0xba, 0x34, 0x5d, 0xdf, 0xeb, 0xa3, 0x82, 0x24, 0x3c, 0x18, 0xb8, 0xbb, 0xb6, - 0x8a, 0x1c, 0x65, 0xf2, 0x0b, 0xe1, 0xa3, 0x75, 0xe1, 0xb9, 0xd0, 0xc6, 0xff, 0xbc, 0x11, 0x82, - 0x6e, 0x17, 0xc2, 0xba, 0x5d, 0x84, 0xa3, 0x61, 0xd4, 0x68, 0xd9, 0x59, 0x0b, 0xf0, 0x0e, 0xef, - 0x09, 0x03, 0xa5, 0x0e, 0x76, 0x4e, 0xee, 0x9d, 0xee, 0x9f, 0x3d, 0xda, 0x2e, 0x41, 0xf8, 0x60, - 0x40, 0xee, 0xbd, 0xb5, 0xe6, 0xa8, 0x67, 0x4c, 0xa2, 0x9b, 0xeb, 0xdb, 0x64, 0xfe, 0x4b, 0x7c, - 0x38, 0xfc, 0xb6, 0x37, 0xbc, 0x4a, 0x67, 0x50, 0xbb, 0x10, 0xf7, 0xc3, 0x87, 0x03, 0xe1, 0x30, - 0xba, 0xf1, 0x1a, 0xdd, 0x52, 0x87, 0x74, 0xbe, 0x20, 0xde, 0xc5, 0x82, 0x78, 0x97, 0x0b, 0xe2, - 0xfd, 0xe8, 0x08, 0x9a, 0x77, 0x04, 0x5d, 0x74, 0x04, 0x5d, 0x76, 0x04, 0xfd, 0xe9, 0x08, 0xfa, - 0xf9, 0x97, 0x78, 0x9f, 0x46, 0xcb, 0x35, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x94, 0x34, 0x0e, - 0xef, 0x30, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go deleted file mode 100644 index 2ce67c116..000000000 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClass defines a class of container runtime supported in the cluster. -// The RuntimeClass is used to determine which container runtime is used to run -// all containers in a pod. RuntimeClasses are (currently) manually defined by a -// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is -// responsible for resolving the RuntimeClassName reference before running the -// pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md -type RuntimeClass struct { - metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters -// that are required to describe the RuntimeClass to the Container Runtime -// Interface (CRI) implementation, as well as any other components that need to -// understand how the pod will be run. The RuntimeClassSpec is immutable. -type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the - // CRI implementation will use to handle pods of this class. The possible - // values are specific to the node & CRI configuration. It is assumed that - // all handlers are available on every node, and handlers of the same name are - // equivalent on every node. - // For example, a handler called "runc" might specify that the runc OCI - // runtime (using native Linux containers) will be used to run the containers - // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. - RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClassList is a list of RuntimeClass objects. -type RuntimeClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index a51fa525d..000000000 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (RuntimeClass) SwaggerDoc() map[string]string { - return map_RuntimeClass -} - -var map_RuntimeClassList = map[string]string{ - "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (RuntimeClassList) SwaggerDoc() map[string]string { - return map_RuntimeClassList -} - -var map_RuntimeClassSpec = map[string]string{ - "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", -} - -func (RuntimeClassSpec) SwaggerDoc() map[string]string { - return map_RuntimeClassSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go deleted file mode 100644 index 27251a8a8..000000000 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ /dev/null @@ -1,564 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func init() { - proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") - proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") -} -func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) - i += copy(dAtA[i:], m.Handler) - return i, nil -} - -func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *RuntimeClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Handler) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RuntimeClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RuntimeClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *RuntimeClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Handler = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RuntimeClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 389 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6a, 0xdb, 0x40, - 0x14, 0x85, 0x35, 0x2e, 0xc6, 0xae, 0xdc, 0x52, 0xa3, 0x4d, 0x8d, 0x17, 0x63, 0x63, 0x28, 0xb8, - 0x0b, 0xcf, 0xd4, 0xa6, 0x94, 0x2e, 0x8b, 0xba, 0x69, 0x4b, 0x4b, 0x41, 0xcb, 0x90, 0x45, 0x46, - 0xd2, 0x8d, 0x34, 0x91, 0xa5, 0x11, 0x9a, 0x91, 0x20, 0xbb, 0x3c, 0x42, 0xf6, 0x79, 0x95, 0x3c, - 0x80, 0x97, 0x5e, 0x7a, 0x65, 0x62, 0xe5, 0x45, 0x82, 0x7e, 0xfc, 0x43, 0x8c, 0x49, 0x76, 0xba, - 0xe7, 0x9e, 0x73, 0xee, 0x87, 0x18, 0xfd, 0x47, 0xf0, 0x5d, 0x12, 0x2e, 0x68, 0x90, 0xda, 0x90, - 0x44, 0xa0, 0x40, 0xd2, 0x0c, 0x22, 0x57, 0x24, 0xb4, 0x5e, 0xb0, 0x98, 0xd3, 0x48, 0xb8, 0x40, - 0xb3, 0xa9, 0x0d, 0x8a, 0x4d, 0xa9, 0x07, 0x11, 0x24, 0x4c, 0x81, 0x4b, 0xe2, 0x44, 0x28, 0x61, - 0x7c, 0xac, 0x8c, 0x84, 0xc5, 0x9c, 0x14, 0x46, 0x52, 0x1b, 0xfb, 0x13, 0x8f, 0x2b, 0x3f, 0xb5, - 0x89, 0x23, 0x42, 0xea, 0x09, 0x4f, 0xd0, 0xd2, 0x6f, 0xa7, 0x97, 0xe5, 0x54, 0x0e, 0xe5, 0x57, - 0xd5, 0xd3, 0xff, 0xba, 0x3f, 0x18, 0x32, 0xc7, 0xe7, 0x11, 0x24, 0xd7, 0x34, 0x0e, 0xbc, 0x42, - 0x90, 0x34, 0x04, 0xc5, 0x68, 0x76, 0x74, 0xbd, 0x4f, 0x4f, 0xa5, 0x92, 0x34, 0x52, 0x3c, 0x84, - 0xa3, 0xc0, 0xb7, 0x97, 0x02, 0xd2, 0xf1, 0x21, 0x64, 0xcf, 0x73, 0xa3, 0x3b, 0xa4, 0xbf, 0xb3, - 0x2a, 0xcb, 0xcf, 0x39, 0x93, 0xd2, 0xb8, 0xd0, 0xdb, 0x05, 0x94, 0xcb, 0x14, 0xeb, 0xa1, 0x21, - 0x1a, 0x77, 0x66, 0x5f, 0xc8, 0xfe, 0x57, 0xec, 0xba, 0x49, 0x1c, 0x78, 0x85, 0x20, 0x49, 0xe1, - 0x26, 0xd9, 0x94, 0xfc, 0xb7, 0xaf, 0xc0, 0x51, 0xff, 0x40, 0x31, 0xd3, 0x58, 0xac, 0x07, 0x5a, - 0xbe, 0x1e, 0xe8, 0x7b, 0xcd, 0xda, 0xb5, 0x1a, 0x9f, 0xf5, 0x96, 0xcf, 0x22, 0x77, 0x0e, 0x49, - 0xaf, 0x31, 0x44, 0xe3, 0xb7, 0xe6, 0x87, 0xda, 0xde, 0xfa, 0x55, 0xc9, 0xd6, 0x76, 0x3f, 0xba, - 0x47, 0x7a, 0xf7, 0x90, 0xee, 0x2f, 0x97, 0xca, 0x38, 0x3f, 0x22, 0x24, 0xaf, 0x23, 0x2c, 0xd2, - 0x25, 0x5f, 0xb7, 0x3e, 0xd8, 0xde, 0x2a, 0x07, 0x74, 0x7f, 0xf4, 0x26, 0x57, 0x10, 0xca, 0x5e, - 0x63, 0xf8, 0x66, 0xdc, 0x99, 0x7d, 0x22, 0x27, 0xde, 0x01, 0x39, 0xe4, 0x32, 0xdf, 0xd7, 0x8d, - 0xcd, 0xdf, 0x45, 0xd6, 0xaa, 0x2a, 0xcc, 0xc9, 0x62, 0x83, 0xb5, 0xe5, 0x06, 0x6b, 0xab, 0x0d, - 0xd6, 0x6e, 0x72, 0x8c, 0x16, 0x39, 0x46, 0xcb, 0x1c, 0xa3, 0x55, 0x8e, 0xd1, 0x43, 0x8e, 0xd1, - 0xed, 0x23, 0xd6, 0xce, 0x5a, 0x75, 0xe3, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x93, 0x68, 0xe5, - 0x0d, 0xb5, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go deleted file mode 100644 index 993c6e506..000000000 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClass defines a class of container runtime supported in the cluster. -// The RuntimeClass is used to determine which container runtime is used to run -// all containers in a pod. RuntimeClasses are (currently) manually defined by a -// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is -// responsible for resolving the RuntimeClassName reference before running the -// pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md -type RuntimeClass struct { - metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Handler specifies the underlying runtime and configuration that the CRI - // implementation will use to handle pods of this class. The possible values - // are specific to the node & CRI configuration. It is assumed that all - // handlers are available on every node, and handlers of the same name are - // equivalent on every node. - // For example, a handler called "runc" might specify that the runc OCI - // runtime (using native Linux containers) will be used to run the containers - // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. - Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClassList is a list of RuntimeClass objects. -type RuntimeClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 8bfa304e7..000000000 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", -} - -func (RuntimeClass) SwaggerDoc() map[string]string { - return map_RuntimeClass -} - -var map_RuntimeClassList = map[string]string{ - "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (RuntimeClassList) SwaggerDoc() map[string]string { - return map_RuntimeClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index f211e8499..000000000 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. -func (in *RuntimeClass) DeepCopy() *RuntimeClass { - if in == nil { - return nil - } - out := new(RuntimeClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RuntimeClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. -func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { - if in == nil { - return nil - } - out := new(RuntimeClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 05d8332f8..9c456f923 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -15,10 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. +// +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index b0fe972b2..d7d62dd3a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -24,7 +25,6 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto It has these top-level messages: - AllowedCSIDriver AllowedFlexVolume AllowedHostPath Eviction @@ -38,9 +38,7 @@ limitations under the License. PodSecurityPolicy PodSecurityPolicyList PodSecurityPolicySpec - RunAsGroupStrategyOptions RunAsUserStrategyOptions - RuntimeClassStrategyOptions SELinuxStrategyOptions SupplementalGroupsStrategyOptions */ @@ -73,94 +71,77 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptorGenerated, []int{9} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} -} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} -} - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{16} + return fileDescriptorGenerated, []int{13} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{18} + return fileDescriptorGenerated, []int{15} } func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") @@ -174,34 +155,10 @@ func init() { proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -896,78 +853,6 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n18, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.RuntimeClass != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RuntimeClass.Size())) - n19, err := m.RuntimeClass.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - return i, nil -} - -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } return i, nil } @@ -1005,45 +890,6 @@ func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.DefaultRuntimeClassName != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i += copy(dAtA[i:], *m.DefaultRuntimeClassName) - } - return i, nil -} - func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1067,11 +913,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n20, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n18 } return i, nil } @@ -1110,6 +956,24 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1119,14 +983,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *AllowedCSIDriver) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l @@ -1356,34 +1212,6 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } @@ -1401,22 +1229,6 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } -func (m *RuntimeClassStrategyOptions) Size() (n int) { - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *SELinuxStrategyOptions) Size() (n int) { var l int _ = l @@ -1456,16 +1268,6 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" @@ -1639,20 +1441,6 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, - `RuntimeClass:` + strings.Replace(fmt.Sprintf("%v", this.RuntimeClass), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1668,17 +1456,6 @@ func (this *RunAsUserStrategyOptions) String() string { }, "") return s } -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} func (this *SELinuxStrategyOptions) String() string { if this == nil { return "nil" @@ -1709,85 +1486,6 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2839,14 +2537,51 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) - } - var mapkey string - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + } + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2856,85 +2591,46 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.DisruptedPods[mapkey] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time + m.DisruptedPods[mapkey] = mapvalue } - m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex case 3: if wireType != 0 { @@ -3841,213 +3537,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4179,115 +3668,6 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4620,123 +4000,113 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1886 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x5e, 0x5a, 0xfb, 0xa3, 0x9d, 0xfd, 0xf1, 0x6a, 0xf6, 0xc7, 0xf4, 0xa6, 0x16, 0x1d, 0x06, - 0x28, 0xdc, 0x34, 0xa1, 0xe2, 0xb5, 0xe3, 0x1a, 0x4d, 0x5b, 0x64, 0xb9, 0xda, 0xb5, 0x37, 0xf0, - 0x7a, 0xd5, 0x91, 0x1d, 0xb4, 0x85, 0x5b, 0x74, 0x24, 0xce, 0x6a, 0x99, 0xa5, 0x48, 0x76, 0x66, - 0xa8, 0xac, 0xee, 0x7a, 0xd1, 0x8b, 0x5e, 0xf6, 0x05, 0x82, 0x3e, 0x40, 0xd1, 0xab, 0xbe, 0x84, - 0x03, 0x14, 0x41, 0x2e, 0x83, 0x5e, 0x08, 0xb5, 0x8a, 0xbe, 0x84, 0xaf, 0x02, 0x8e, 0x86, 0x94, - 0xf8, 0x27, 0xd9, 0x01, 0xec, 0x3b, 0x72, 0xce, 0xf7, 0x7d, 0x67, 0xe6, 0x9c, 0x99, 0x33, 0x87, - 0x04, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad, - 0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd, - 0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7, - 0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9, - 0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0x78, - 0x13, 0x2f, 0xe2, 0x69, 0xa4, 0xb4, 0xab, 0x4f, 0x38, 0x6d, 0x7b, 0x94, 0xd4, 0x7a, 0x19, 0x6f, - 0xbb, 0x77, 0xc7, 0x98, 0x2e, 0x6e, 0x9f, 0xdb, 0x2e, 0xa1, 0xfd, 0x9a, 0x7f, 0xd1, 0x09, 0x07, - 0x58, 0xad, 0x4b, 0x38, 0xce, 0x63, 0xd5, 0x8a, 0x58, 0x34, 0x70, 0xb9, 0xdd, 0x25, 0x19, 0xc2, - 0xbd, 0x59, 0x04, 0xd6, 0x3e, 0x27, 0x5d, 0x9c, 0xe1, 0xdd, 0x29, 0xe2, 0x05, 0xdc, 0x76, 0x6a, - 0xb6, 0xcb, 0x19, 0xa7, 0x69, 0x92, 0x7e, 0x17, 0x6c, 0xec, 0x3b, 0x8e, 0xf7, 0x25, 0xb1, 0x0e, - 0x9a, 0xc7, 0x75, 0x6a, 0xf7, 0x08, 0x85, 0x37, 0xc1, 0xbc, 0x8b, 0xbb, 0x44, 0x55, 0x6e, 0x2a, - 0xb7, 0x96, 0xcd, 0xd5, 0xe7, 0x03, 0x6d, 0x6e, 0x38, 0xd0, 0xe6, 0x1f, 0xe3, 0x2e, 0x41, 0xc2, - 0xa2, 0x7f, 0x02, 0x2a, 0x92, 0x75, 0xe4, 0x90, 0xcb, 0xcf, 0x3d, 0x27, 0xe8, 0x12, 0xf8, 0x63, - 0xb0, 0x68, 0x09, 0x01, 0x49, 0x5c, 0x97, 0xc4, 0xc5, 0x91, 0x2c, 0x92, 0x56, 0x9d, 0x81, 0xab, - 0x92, 0xfc, 0xd0, 0x63, 0xbc, 0x81, 0xf9, 0x39, 0xdc, 0x03, 0xc0, 0xc7, 0xfc, 0xbc, 0x41, 0xc9, - 0x99, 0x7d, 0x29, 0xe9, 0x50, 0xd2, 0x41, 0x23, 0xb6, 0xa0, 0x09, 0x14, 0xfc, 0x00, 0x94, 0x29, - 0xc1, 0xd6, 0xa9, 0xeb, 0xf4, 0xd5, 0x2b, 0x37, 0x95, 0x5b, 0x65, 0x73, 0x43, 0x32, 0xca, 0x48, - 0x8e, 0xa3, 0x18, 0xa1, 0xff, 0x47, 0x01, 0xe5, 0xc3, 0x9e, 0xdd, 0xe6, 0xb6, 0xe7, 0xc2, 0x3f, - 0x82, 0x72, 0x98, 0x2d, 0x0b, 0x73, 0x2c, 0x9c, 0xad, 0xec, 0x7d, 0x64, 0x8c, 0x77, 0x52, 0x1c, - 0x3c, 0xc3, 0xbf, 0xe8, 0x84, 0x03, 0xcc, 0x08, 0xd1, 0x46, 0xef, 0xb6, 0x71, 0xda, 0xfa, 0x82, - 0xb4, 0xf9, 0x09, 0xe1, 0x78, 0x3c, 0xbd, 0xf1, 0x18, 0x8a, 0x55, 0xa1, 0x03, 0xd6, 0x2c, 0xe2, - 0x10, 0x4e, 0x4e, 0xfd, 0xd0, 0x23, 0x13, 0x33, 0x5c, 0xd9, 0xbb, 0xf3, 0x6a, 0x6e, 0xea, 0x93, - 0x54, 0xb3, 0x32, 0x1c, 0x68, 0x6b, 0x89, 0x21, 0x94, 0x14, 0xd7, 0xbf, 0x52, 0xc0, 0xce, 0x51, - 0xf3, 0x01, 0xf5, 0x02, 0xbf, 0xc9, 0xc3, 0xec, 0x76, 0xfa, 0xd2, 0x04, 0x7f, 0x06, 0xe6, 0x69, - 0xe0, 0x44, 0xb9, 0x7c, 0x2f, 0xca, 0x25, 0x0a, 0x1c, 0xf2, 0x72, 0xa0, 0x6d, 0xa6, 0x58, 0x4f, - 0xfa, 0x3e, 0x41, 0x82, 0x00, 0x3f, 0x03, 0x8b, 0x14, 0xbb, 0x1d, 0x12, 0x4e, 0xbd, 0x74, 0x6b, - 0x65, 0x4f, 0x37, 0x0a, 0xcf, 0x9a, 0x71, 0x5c, 0x47, 0x21, 0x74, 0x9c, 0x71, 0xf1, 0xca, 0x90, - 0x54, 0xd0, 0x4f, 0xc0, 0x9a, 0x48, 0xb5, 0x47, 0xb9, 0xb0, 0xc0, 0x1b, 0xa0, 0xd4, 0xb5, 0x5d, - 0x31, 0xa9, 0x05, 0x73, 0x45, 0xb2, 0x4a, 0x27, 0xb6, 0x8b, 0xc2, 0x71, 0x61, 0xc6, 0x97, 0x22, - 0x66, 0x93, 0x66, 0x7c, 0x89, 0xc2, 0x71, 0xfd, 0x01, 0x58, 0x92, 0x1e, 0x27, 0x85, 0x4a, 0xd3, - 0x85, 0x4a, 0x39, 0x42, 0xff, 0xb8, 0x02, 0x36, 0x1b, 0x9e, 0x55, 0xb7, 0x19, 0x0d, 0x44, 0xbc, - 0xcc, 0xc0, 0xea, 0x10, 0xfe, 0x16, 0xf6, 0xc7, 0x13, 0x30, 0xcf, 0x7c, 0xd2, 0x96, 0xdb, 0x62, - 0x6f, 0x4a, 0x6c, 0x73, 0xe6, 0xd7, 0xf4, 0x49, 0x7b, 0x7c, 0x2c, 0xc3, 0x37, 0x24, 0xd4, 0xe0, - 0x33, 0xb0, 0xc8, 0x38, 0xe6, 0x01, 0x53, 0x4b, 0x42, 0xf7, 0xee, 0x6b, 0xea, 0x0a, 0xee, 0x38, - 0x8b, 0xa3, 0x77, 0x24, 0x35, 0xf5, 0x7f, 0x2b, 0xe0, 0x5a, 0x0e, 0xeb, 0x91, 0xcd, 0x38, 0x7c, - 0x96, 0x89, 0x98, 0xf1, 0x6a, 0x11, 0x0b, 0xd9, 0x22, 0x5e, 0xf1, 0xe1, 0x8d, 0x46, 0x26, 0xa2, - 0xd5, 0x04, 0x0b, 0x36, 0x27, 0xdd, 0x68, 0x2b, 0x1a, 0xaf, 0xb7, 0x2c, 0x73, 0x4d, 0x4a, 0x2f, - 0x1c, 0x87, 0x22, 0x68, 0xa4, 0xa5, 0x7f, 0x73, 0x25, 0x77, 0x39, 0x61, 0x38, 0xe1, 0x19, 0x58, - 0xed, 0xda, 0xee, 0x7e, 0x0f, 0xdb, 0x0e, 0x6e, 0xc9, 0xd3, 0x33, 0x6d, 0x13, 0x84, 0x15, 0xd6, - 0x18, 0x55, 0x58, 0xe3, 0xd8, 0xe5, 0xa7, 0xb4, 0xc9, 0xa9, 0xed, 0x76, 0xcc, 0x8d, 0xe1, 0x40, - 0x5b, 0x3d, 0x99, 0x50, 0x42, 0x09, 0x5d, 0xf8, 0x7b, 0x50, 0x66, 0xc4, 0x21, 0x6d, 0xee, 0xd1, - 0xd7, 0xab, 0x10, 0x8f, 0x70, 0x8b, 0x38, 0x4d, 0x49, 0x35, 0x57, 0xc3, 0xb8, 0x45, 0x6f, 0x28, - 0x96, 0x84, 0x0e, 0x58, 0xef, 0xe2, 0xcb, 0xa7, 0x2e, 0x8e, 0x17, 0x52, 0xfa, 0x81, 0x0b, 0x81, - 0xc3, 0x81, 0xb6, 0x7e, 0x92, 0xd0, 0x42, 0x29, 0x6d, 0xfd, 0xff, 0xf3, 0xe0, 0x7a, 0xe1, 0xae, - 0x82, 0x9f, 0x01, 0xe8, 0xb5, 0x18, 0xa1, 0x3d, 0x62, 0x3d, 0x18, 0xdd, 0x41, 0xb6, 0x17, 0x1d, - 0xdc, 0x5d, 0x99, 0x20, 0x78, 0x9a, 0x41, 0xa0, 0x1c, 0x16, 0xfc, 0x8b, 0x02, 0xd6, 0xac, 0x91, - 0x1b, 0x62, 0x35, 0x3c, 0x2b, 0xda, 0x18, 0x0f, 0x7e, 0xc8, 0x7e, 0x37, 0xea, 0x93, 0x4a, 0x87, - 0x2e, 0xa7, 0x7d, 0x73, 0x5b, 0x4e, 0x68, 0x2d, 0x61, 0x43, 0x49, 0xa7, 0xf0, 0x04, 0x40, 0x2b, - 0x96, 0x64, 0xf2, 0x4e, 0x13, 0x21, 0x5e, 0x30, 0x6f, 0x48, 0x85, 0xed, 0x84, 0xdf, 0x08, 0x84, - 0x72, 0x88, 0xf0, 0x57, 0x60, 0xbd, 0x1d, 0x50, 0x4a, 0x5c, 0xfe, 0x90, 0x60, 0x87, 0x9f, 0xf7, - 0xd5, 0x79, 0x21, 0xb5, 0x23, 0xa5, 0xd6, 0x0f, 0x12, 0x56, 0x94, 0x42, 0x87, 0x7c, 0x8b, 0x30, - 0x9b, 0x12, 0x2b, 0xe2, 0x2f, 0x24, 0xf9, 0xf5, 0x84, 0x15, 0xa5, 0xd0, 0xf0, 0x3e, 0x58, 0x25, - 0x97, 0x3e, 0x69, 0x47, 0x31, 0x5d, 0x14, 0xec, 0x2d, 0xc9, 0x5e, 0x3d, 0x9c, 0xb0, 0xa1, 0x04, - 0x72, 0xd7, 0x01, 0x30, 0x1b, 0x44, 0xb8, 0x01, 0x4a, 0x17, 0xa4, 0x3f, 0xba, 0x79, 0x50, 0xf8, - 0x08, 0x3f, 0x05, 0x0b, 0x3d, 0xec, 0x04, 0x44, 0xee, 0xf5, 0xf7, 0x5f, 0x6d, 0xaf, 0x3f, 0xb1, - 0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d, - 0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0x91, 0xe7, 0xb7, 0x50, 0xb3, 0x51, 0xa2, 0x66, 0x7f, 0x34, - 0x7d, 0xaf, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45, - 0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22, - 0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77, - 0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e, - 0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53, - 0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6, - 0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f, - 0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d, - 0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6, - 0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96, - 0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73, - 0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xcc, 0xe0, 0xca, - 0xc3, 0xb1, 0x09, 0x4d, 0xe2, 0xe0, 0x6f, 0xc1, 0xf2, 0xb9, 0x6c, 0xfb, 0x98, 0xba, 0x24, 0x36, - 0xda, 0xad, 0x29, 0x1b, 0x2d, 0xd1, 0x22, 0x9a, 0x15, 0x29, 0xbf, 0x1c, 0x0d, 0x33, 0x34, 0x56, - 0x83, 0x3f, 0x01, 0x4b, 0xe2, 0xe5, 0xb8, 0xae, 0x96, 0xc5, 0x6c, 0xae, 0x4a, 0xf8, 0xd2, 0xc3, - 0xd1, 0x30, 0x8a, 0xec, 0x11, 0xf4, 0xb8, 0x71, 0xa0, 0x2e, 0x67, 0xa1, 0xc7, 0x8d, 0x03, 0x14, - 0xd9, 0xe1, 0x33, 0xb0, 0xc4, 0xc8, 0x23, 0xdb, 0x0d, 0x2e, 0x55, 0x20, 0x8e, 0xdc, 0xed, 0x29, - 0xd3, 0x6d, 0x1e, 0x0a, 0x64, 0xaa, 0xe1, 0x1e, 0xab, 0x4b, 0x3b, 0x8a, 0x24, 0xa1, 0x05, 0x96, - 0x69, 0xe0, 0xee, 0xb3, 0xa7, 0x8c, 0x50, 0x75, 0x25, 0x73, 0xdb, 0xa7, 0xf5, 0x51, 0x84, 0x4d, - 0x7b, 0x88, 0x23, 0x13, 0x23, 0xd0, 0x58, 0x18, 0xfe, 0x55, 0x01, 0x90, 0x05, 0xbe, 0xef, 0x90, - 0x2e, 0x71, 0x39, 0x76, 0x44, 0x7f, 0xcf, 0xd4, 0x55, 0xe1, 0xef, 0x17, 0xd3, 0xd6, 0x93, 0x21, - 0xa5, 0x1d, 0xc7, 0xd7, 0x74, 0x16, 0x8a, 0x72, 0x7c, 0x86, 0xe1, 0x3c, 0x63, 0xe2, 0x59, 0x5d, - 0x9b, 0x19, 0xce, 0xfc, 0xef, 0x97, 0x71, 0x38, 0xa5, 0x1d, 0x45, 0x92, 0xf0, 0x73, 0xb0, 0x13, - 0x7d, 0xdd, 0x21, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f, 0x71, 0xd2, 0x55, 0xd7, 0x45, 0x9a, - 0xab, 0x92, 0xb9, 0x83, 0x72, 0x51, 0xa8, 0x80, 0x0d, 0xbb, 0x40, 0x8b, 0xca, 0x43, 0x78, 0x76, - 0xe2, 0xfa, 0x74, 0xc8, 0xda, 0xd8, 0x19, 0x75, 0x2d, 0x57, 0x85, 0x83, 0xf7, 0x86, 0x03, 0x4d, - 0xab, 0x4f, 0x87, 0xa2, 0x59, 0x5a, 0xf0, 0x37, 0x40, 0xc5, 0x45, 0x7e, 0x36, 0x84, 0x9f, 0x1f, - 0x85, 0x35, 0xa7, 0xd0, 0x41, 0x21, 0x1b, 0xfa, 0x60, 0x03, 0x27, 0xbf, 0xb3, 0x99, 0x5a, 0x11, - 0xa7, 0xf0, 0xfd, 0x29, 0x79, 0x48, 0x7d, 0x9a, 0x9b, 0xaa, 0x0c, 0xe3, 0x46, 0xca, 0xc0, 0x50, - 0x46, 0x1d, 0x5e, 0x02, 0x88, 0xd3, 0xbf, 0x05, 0x98, 0x0a, 0x67, 0x5e, 0x31, 0x99, 0x7f, 0x09, - 0xe3, 0xad, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x3e, 0x02, 0x5b, 0x72, 0xf4, 0xa9, 0xcb, 0xf0, - 0x19, 0x69, 0xf6, 0x59, 0x9b, 0x3b, 0x4c, 0xdd, 0x14, 0xf5, 0x4d, 0x1d, 0x0e, 0xb4, 0xad, 0xfd, - 0x1c, 0x3b, 0xca, 0x65, 0xc1, 0x4f, 0xc1, 0xc6, 0x99, 0x47, 0x5b, 0xb6, 0x65, 0x11, 0x37, 0x52, - 0xda, 0x12, 0x4a, 0x5b, 0x61, 0x24, 0x8e, 0x52, 0x36, 0x94, 0x41, 0x43, 0x06, 0xb6, 0xa5, 0x72, - 0x83, 0x7a, 0xed, 0x13, 0x2f, 0x70, 0x79, 0x58, 0x52, 0x99, 0xba, 0x1d, 0x5f, 0x23, 0xdb, 0xfb, - 0x79, 0x80, 0x97, 0x03, 0xed, 0x66, 0x4e, 0x49, 0x4f, 0x80, 0x50, 0xbe, 0x36, 0xb4, 0x00, 0x10, - 0x75, 0x60, 0x74, 0xe4, 0x76, 0x66, 0x7e, 0x02, 0xa2, 0x18, 0x9c, 0x3e, 0x75, 0xeb, 0xe1, 0xcd, - 0x3c, 0x36, 0xa3, 0x09, 0x5d, 0xc8, 0x41, 0x05, 0xa7, 0xfe, 0x18, 0x31, 0xf5, 0x9a, 0xc8, 0xf1, - 0x4f, 0x67, 0xe7, 0x38, 0xe6, 0x98, 0xd7, 0x65, 0x8a, 0x2b, 0x69, 0x0b, 0x43, 0x59, 0x07, 0xd0, - 0x01, 0xab, 0xf2, 0xf7, 0xd7, 0x81, 0x83, 0x19, 0x53, 0x55, 0xb1, 0xba, 0x7b, 0xd3, 0x57, 0x17, - 0xc3, 0xd3, 0xeb, 0x13, 0xdf, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, - 0xd1, 0x81, 0xf7, 0x12, 0xff, 0x54, 0xf4, 0xd4, 0x3f, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x5f, 0x2a, - 0x5f, 0x29, 0x40, 0x2d, 0xba, 0x21, 0xe0, 0xc7, 0x89, 0x09, 0xbe, 0x9b, 0x9a, 0x60, 0x25, 0xc3, - 0x7b, 0x03, 0xf3, 0xfb, 0x46, 0x01, 0xef, 0x4c, 0xc9, 0x40, 0x5c, 0xf6, 0x88, 0x35, 0x89, 0x7a, - 0x8c, 0xc3, 0x82, 0xa1, 0x88, 0x33, 0x32, 0x2e, 0x7b, 0x39, 0x18, 0x54, 0xc8, 0x86, 0x4f, 0xc1, - 0x35, 0x59, 0x73, 0xd3, 0x36, 0xd1, 0xb9, 0x2f, 0x9b, 0xef, 0x0c, 0x07, 0xda, 0xb5, 0x7a, 0x3e, - 0x04, 0x15, 0x71, 0xf5, 0x7f, 0x2a, 0x60, 0x27, 0xff, 0xca, 0x87, 0x77, 0x12, 0xe1, 0xd6, 0x52, - 0xe1, 0xbe, 0x9a, 0x62, 0xc9, 0x60, 0xff, 0x01, 0xac, 0xcb, 0xc6, 0x20, 0xf9, 0x8b, 0x30, 0x11, - 0xf4, 0xf0, 0xf8, 0x87, 0x3d, 0xbd, 0x94, 0x88, 0xb6, 0xaf, 0xf8, 0x1a, 0x4f, 0x8e, 0xa1, 0x94, - 0x9a, 0xfe, 0x2f, 0x05, 0xbc, 0x3b, 0xf3, 0x4a, 0x87, 0x66, 0x62, 0xea, 0x46, 0x6a, 0xea, 0xd5, - 0x62, 0x81, 0x37, 0xf3, 0xa7, 0xd0, 0xfc, 0xf0, 0xf9, 0x8b, 0xea, 0xdc, 0xb7, 0x2f, 0xaa, 0x73, - 0xdf, 0xbd, 0xa8, 0xce, 0xfd, 0x79, 0x58, 0x55, 0x9e, 0x0f, 0xab, 0xca, 0xb7, 0xc3, 0xaa, 0xf2, - 0xdd, 0xb0, 0xaa, 0xfc, 0x77, 0x58, 0x55, 0xfe, 0xf6, 0xbf, 0xea, 0xdc, 0xef, 0x96, 0xa4, 0xdc, - 0xf7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x56, 0x4d, 0xc9, 0x62, 0x44, 0x18, 0x00, 0x00, + // 1715 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x9a, 0x92, 0x48, 0x8d, 0x24, 0x5a, 0x1a, 0xfd, 0xe9, 0x46, 0xa8, 0xb9, 0x0e, 0x03, + 0x14, 0x6e, 0x90, 0x2c, 0x63, 0x39, 0x69, 0x8d, 0xa6, 0x2d, 0xa2, 0x35, 0x25, 0x5b, 0x81, 0x55, + 0xb1, 0x43, 0x3b, 0x68, 0x0b, 0xb7, 0xe8, 0x70, 0x77, 0x44, 0x4e, 0xb4, 0xdc, 0xdd, 0xce, 0xcc, + 0x32, 0xe4, 0xad, 0x87, 0x1e, 0x7a, 0xec, 0x17, 0xc8, 0x27, 0x28, 0x7a, 0xea, 0x97, 0x50, 0x81, + 0xa2, 0xc8, 0x31, 0xe8, 0x81, 0xa8, 0x59, 0xf4, 0x4b, 0xf8, 0xd2, 0x60, 0x87, 0xb3, 0x24, 0xf7, + 0x0f, 0x29, 0x2b, 0x40, 0x7c, 0xdb, 0x9d, 0xf7, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0x6f, 0xde, 0xce, + 0x0e, 0xb0, 0x2e, 0x1f, 0x72, 0x93, 0xfa, 0xb5, 0xcb, 0xb0, 0x45, 0x98, 0x47, 0x04, 0xe1, 0xb5, + 0x1e, 0xf1, 0x1c, 0x9f, 0xd5, 0x94, 0x01, 0x07, 0xb4, 0x16, 0xf8, 0x2e, 0xb5, 0x07, 0xb5, 0xde, + 0xfd, 0x16, 0x11, 0xf8, 0x7e, 0xad, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x19, 0x30, 0x5f, 0xf8, + 0xf0, 0xad, 0x31, 0xd4, 0xc4, 0x01, 0x35, 0xc7, 0x50, 0x53, 0x41, 0x0f, 0xde, 0x6f, 0x53, 0xd1, + 0x09, 0x5b, 0xa6, 0xed, 0x77, 0x6b, 0x6d, 0xbf, 0xed, 0xd7, 0x24, 0xa3, 0x15, 0x5e, 0xc8, 0x37, + 0xf9, 0x22, 0x9f, 0xc6, 0x4a, 0x07, 0xd5, 0x19, 0xa7, 0xb6, 0xcf, 0x48, 0xad, 0x97, 0xf1, 0x76, + 0xf0, 0xe1, 0x14, 0xd3, 0xc5, 0x76, 0x87, 0x7a, 0x84, 0x0d, 0x6a, 0xc1, 0x65, 0x3b, 0x1a, 0xe0, + 0xb5, 0x2e, 0x11, 0x38, 0x8f, 0x55, 0x9b, 0xc7, 0x62, 0xa1, 0x27, 0x68, 0x97, 0x64, 0x08, 0x3f, + 0xba, 0x8e, 0xc0, 0xed, 0x0e, 0xe9, 0xe2, 0x0c, 0xef, 0xc1, 0x3c, 0x5e, 0x28, 0xa8, 0x5b, 0xa3, + 0x9e, 0xe0, 0x82, 0xa5, 0x49, 0xd5, 0x8f, 0xc1, 0xf6, 0x91, 0xeb, 0xfa, 0x5f, 0x10, 0xe7, 0xc4, + 0x25, 0xfd, 0xcf, 0x7c, 0x37, 0xec, 0x12, 0xf8, 0x03, 0xb0, 0xea, 0x30, 0xda, 0x23, 0x4c, 0xd7, + 0xee, 0x6a, 0xf7, 0xd6, 0xac, 0xf2, 0xd5, 0xd0, 0x58, 0x1a, 0x0d, 0x8d, 0xd5, 0xba, 0x1c, 0x45, + 0xca, 0x5a, 0xe5, 0xe0, 0xb6, 0x22, 0x3f, 0xf1, 0xb9, 0x68, 0x60, 0xd1, 0x81, 0x87, 0x00, 0x04, + 0x58, 0x74, 0x1a, 0x8c, 0x5c, 0xd0, 0xbe, 0xa2, 0x43, 0x45, 0x07, 0x8d, 0x89, 0x05, 0xcd, 0xa0, + 0xe0, 0x7b, 0xa0, 0xc4, 0x08, 0x76, 0xce, 0x3d, 0x77, 0xa0, 0xdf, 0xba, 0xab, 0xdd, 0x2b, 0x59, + 0x5b, 0x8a, 0x51, 0x42, 0x6a, 0x1c, 0x4d, 0x10, 0xd5, 0x7f, 0x6b, 0xa0, 0x74, 0xdc, 0xa3, 0xb6, + 0xa0, 0xbe, 0x07, 0x7f, 0x0f, 0x4a, 0x51, 0xde, 0x1d, 0x2c, 0xb0, 0x74, 0xb6, 0x7e, 0xf8, 0x81, + 0x39, 0xad, 0x89, 0x49, 0x1a, 0xcc, 0xe0, 0xb2, 0x1d, 0x0d, 0x70, 0x33, 0x42, 0x9b, 0xbd, 0xfb, + 0xe6, 0x79, 0xeb, 0x73, 0x62, 0x8b, 0x33, 0x22, 0xf0, 0x34, 0xbc, 0xe9, 0x18, 0x9a, 0xa8, 0x42, + 0x17, 0x6c, 0x3a, 0xc4, 0x25, 0x82, 0x9c, 0x07, 0x91, 0x47, 0x2e, 0x23, 0x5c, 0x3f, 0x7c, 0xf0, + 0x7a, 0x6e, 0xea, 0xb3, 0x54, 0x6b, 0x7b, 0x34, 0x34, 0x36, 0x13, 0x43, 0x28, 0x29, 0x5e, 0xfd, + 0x52, 0x03, 0xfb, 0x27, 0xcd, 0xc7, 0xcc, 0x0f, 0x83, 0xa6, 0x88, 0xd6, 0xa9, 0x3d, 0x50, 0x26, + 0xf8, 0x63, 0xb0, 0xcc, 0x42, 0x97, 0xa8, 0x9c, 0xbe, 0xa3, 0x82, 0x5e, 0x46, 0xa1, 0x4b, 0x5e, + 0x0d, 0x8d, 0x9d, 0x14, 0xeb, 0xd9, 0x20, 0x20, 0x48, 0x12, 0xe0, 0xa7, 0x60, 0x95, 0x61, 0xaf, + 0x4d, 0xa2, 0xd0, 0x0b, 0xf7, 0xd6, 0x0f, 0xab, 0xe6, 0xdc, 0x5d, 0x63, 0x9e, 0xd6, 0x51, 0x04, + 0x9d, 0xae, 0xb8, 0x7c, 0xe5, 0x48, 0x29, 0x54, 0xcf, 0xc0, 0xa6, 0x5c, 0x6a, 0x9f, 0x09, 0x69, + 0x81, 0x77, 0x40, 0xa1, 0x4b, 0x3d, 0x19, 0xd4, 0x8a, 0xb5, 0xae, 0x58, 0x85, 0x33, 0xea, 0xa1, + 0x68, 0x5c, 0x9a, 0x71, 0x5f, 0xe6, 0x6c, 0xd6, 0x8c, 0xfb, 0x28, 0x1a, 0xaf, 0x3e, 0x06, 0x45, + 0xe5, 0x71, 0x56, 0xa8, 0xb0, 0x58, 0xa8, 0x90, 0x23, 0xf4, 0xd7, 0x5b, 0x60, 0xa7, 0xe1, 0x3b, + 0x75, 0xca, 0x59, 0x28, 0xf3, 0x65, 0x85, 0x4e, 0x9b, 0x88, 0x37, 0x50, 0x1f, 0xcf, 0xc0, 0x32, + 0x0f, 0x88, 0xad, 0xca, 0xe2, 0x70, 0x41, 0x6e, 0x73, 0xe2, 0x6b, 0x06, 0xc4, 0xb6, 0x36, 0xe2, + 0xa5, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x2f, 0xc0, 0x2a, 0x17, 0x58, 0x84, 0x5c, 0x2f, 0x48, 0xdd, + 0x0f, 0x6f, 0xa8, 0x2b, 0xb9, 0xd3, 0x55, 0x1c, 0xbf, 0x23, 0xa5, 0x59, 0xfd, 0xa7, 0x06, 0xbe, + 0x97, 0xc3, 0x7a, 0x4a, 0xb9, 0x80, 0x2f, 0x32, 0x19, 0x33, 0x5f, 0x2f, 0x63, 0x11, 0x5b, 0xe6, + 0x6b, 0xb2, 0x79, 0xe3, 0x91, 0x99, 0x6c, 0x35, 0xc1, 0x0a, 0x15, 0xa4, 0x1b, 0x97, 0xa2, 0x79, + 0xb3, 0x69, 0x59, 0x9b, 0x4a, 0x7a, 0xe5, 0x34, 0x12, 0x41, 0x63, 0xad, 0xea, 0xbf, 0x6e, 0xe5, + 0x4e, 0x27, 0x4a, 0x27, 0xbc, 0x00, 0x1b, 0x5d, 0xea, 0x1d, 0xf5, 0x30, 0x75, 0x71, 0x4b, 0xed, + 0x9e, 0x45, 0x45, 0x10, 0xf5, 0x4a, 0x73, 0xdc, 0x2b, 0xcd, 0x53, 0x4f, 0x9c, 0xb3, 0xa6, 0x60, + 0xd4, 0x6b, 0x5b, 0x5b, 0xa3, 0xa1, 0xb1, 0x71, 0x36, 0xa3, 0x84, 0x12, 0xba, 0xf0, 0xb7, 0xa0, + 0xc4, 0x89, 0x4b, 0x6c, 0xe1, 0xb3, 0x9b, 0x75, 0x88, 0xa7, 0xb8, 0x45, 0xdc, 0xa6, 0xa2, 0x5a, + 0x1b, 0x51, 0xde, 0xe2, 0x37, 0x34, 0x91, 0x84, 0x2e, 0x28, 0x77, 0x71, 0xff, 0xb9, 0x87, 0x27, + 0x13, 0x29, 0x7c, 0xcb, 0x89, 0xc0, 0xd1, 0xd0, 0x28, 0x9f, 0x25, 0xb4, 0x50, 0x4a, 0xbb, 0xfa, + 0xbf, 0x65, 0xf0, 0xd6, 0xdc, 0xaa, 0x82, 0x9f, 0x02, 0xe8, 0xb7, 0x38, 0x61, 0x3d, 0xe2, 0x3c, + 0x1e, 0x7f, 0x4d, 0xa8, 0x1f, 0x6f, 0xdc, 0x03, 0xb5, 0x40, 0xf0, 0x3c, 0x83, 0x40, 0x39, 0x2c, + 0xf8, 0x27, 0x0d, 0x6c, 0x3a, 0x63, 0x37, 0xc4, 0x69, 0xf8, 0x4e, 0x5c, 0x18, 0x8f, 0xbf, 0x4d, + 0xbd, 0x9b, 0xf5, 0x59, 0xa5, 0x63, 0x4f, 0xb0, 0x81, 0xb5, 0xa7, 0x02, 0xda, 0x4c, 0xd8, 0x50, + 0xd2, 0x29, 0x3c, 0x03, 0xd0, 0x99, 0x48, 0x72, 0xf5, 0x4d, 0x93, 0x29, 0x5e, 0xb1, 0xee, 0x28, + 0x85, 0xbd, 0x84, 0xdf, 0x18, 0x84, 0x72, 0x88, 0xf0, 0xe7, 0xa0, 0x6c, 0x87, 0x8c, 0x11, 0x4f, + 0x3c, 0x21, 0xd8, 0x15, 0x9d, 0x81, 0xbe, 0x2c, 0xa5, 0xf6, 0x95, 0x54, 0xf9, 0x51, 0xc2, 0x8a, + 0x52, 0xe8, 0x88, 0xef, 0x10, 0x4e, 0x19, 0x71, 0x62, 0xfe, 0x4a, 0x92, 0x5f, 0x4f, 0x58, 0x51, + 0x0a, 0x0d, 0x1f, 0x82, 0x0d, 0xd2, 0x0f, 0x88, 0x1d, 0xe7, 0x74, 0x55, 0xb2, 0x77, 0x15, 0x7b, + 0xe3, 0x78, 0xc6, 0x86, 0x12, 0xc8, 0x03, 0x17, 0xc0, 0x6c, 0x12, 0xe1, 0x16, 0x28, 0x5c, 0x92, + 0xc1, 0xf8, 0xcb, 0x83, 0xa2, 0x47, 0xf8, 0x09, 0x58, 0xe9, 0x61, 0x37, 0x24, 0xaa, 0xd6, 0xdf, + 0x7d, 0xbd, 0x5a, 0x7f, 0x46, 0xbb, 0x04, 0x8d, 0x89, 0x3f, 0xb9, 0xf5, 0x50, 0xab, 0xfe, 0x43, + 0x03, 0xdb, 0x0d, 0xdf, 0x69, 0x12, 0x3b, 0x64, 0x54, 0x0c, 0x1a, 0x72, 0x9d, 0xdf, 0x40, 0xcf, + 0x46, 0x89, 0x9e, 0xfd, 0xc1, 0xe2, 0x5a, 0x4b, 0x46, 0x37, 0xaf, 0x63, 0x57, 0xaf, 0x34, 0xb0, + 0x97, 0x41, 0xbf, 0x81, 0x8e, 0xfa, 0xcb, 0x64, 0x47, 0x7d, 0xef, 0x26, 0x93, 0x99, 0xd3, 0x4f, + 0xff, 0x5f, 0xce, 0x99, 0x8a, 0xec, 0xa6, 0xd1, 0xe9, 0x8e, 0xd1, 0x1e, 0x75, 0x49, 0x9b, 0x38, + 0x72, 0x32, 0xa5, 0x99, 0xd3, 0xdd, 0xc4, 0x82, 0x66, 0x50, 0x90, 0x83, 0x7d, 0x87, 0x5c, 0xe0, + 0xd0, 0x15, 0x47, 0x8e, 0xf3, 0x08, 0x07, 0xb8, 0x45, 0x5d, 0x2a, 0xa8, 0x3a, 0x8e, 0xac, 0x59, + 0x1f, 0x8f, 0x86, 0xc6, 0x7e, 0x3d, 0x17, 0xf1, 0x6a, 0x68, 0xdc, 0xc9, 0x9e, 0xcb, 0xcd, 0x09, + 0x64, 0x80, 0xe6, 0x48, 0xc3, 0x01, 0xd0, 0x19, 0xf9, 0x43, 0x18, 0x6d, 0x8a, 0x3a, 0xf3, 0x83, + 0x84, 0xdb, 0x82, 0x74, 0xfb, 0xb3, 0xd1, 0xd0, 0xd0, 0xd1, 0x1c, 0xcc, 0xf5, 0x8e, 0xe7, 0xca, + 0xc3, 0xcf, 0xc1, 0x0e, 0x1e, 0xf7, 0x81, 0x84, 0xd7, 0x65, 0xe9, 0xf5, 0xe1, 0x68, 0x68, 0xec, + 0x1c, 0x65, 0xcd, 0xd7, 0x3b, 0xcc, 0x13, 0x85, 0x35, 0x50, 0xec, 0xc9, 0x23, 0x3b, 0xd7, 0x57, + 0xa4, 0xfe, 0xde, 0x68, 0x68, 0x14, 0xc7, 0xa7, 0xf8, 0x48, 0x73, 0xf5, 0xa4, 0x29, 0x0f, 0x82, + 0x31, 0x0a, 0x7e, 0x04, 0xd6, 0x3b, 0x3e, 0x17, 0xbf, 0x20, 0xe2, 0x0b, 0x9f, 0x5d, 0xca, 0xc6, + 0x50, 0xb2, 0x76, 0xd4, 0x0a, 0xae, 0x3f, 0x99, 0x9a, 0xd0, 0x2c, 0x0e, 0xfe, 0x1a, 0xac, 0x75, + 0xd4, 0xb1, 0x8f, 0xeb, 0x45, 0x59, 0x68, 0xf7, 0x16, 0x14, 0x5a, 0xe2, 0x88, 0x68, 0x6d, 0x2b, + 0xf9, 0xb5, 0x78, 0x98, 0xa3, 0xa9, 0x1a, 0xfc, 0x21, 0x28, 0xca, 0x97, 0xd3, 0xba, 0x5e, 0x92, + 0xd1, 0xdc, 0x56, 0xf0, 0xe2, 0x93, 0xf1, 0x30, 0x8a, 0xed, 0x31, 0xf4, 0xb4, 0xf1, 0x48, 0x5f, + 0xcb, 0x42, 0x4f, 0x1b, 0x8f, 0x50, 0x6c, 0x87, 0x2f, 0x40, 0x91, 0x93, 0xa7, 0xd4, 0x0b, 0xfb, + 0x3a, 0x90, 0x5b, 0xee, 0xfe, 0x82, 0x70, 0x9b, 0xc7, 0x12, 0x99, 0x3a, 0x70, 0x4f, 0xd5, 0x95, + 0x1d, 0xc5, 0x92, 0xd0, 0x01, 0x6b, 0x2c, 0xf4, 0x8e, 0xf8, 0x73, 0x4e, 0x98, 0xbe, 0x9e, 0xf9, + 0xda, 0xa7, 0xf5, 0x51, 0x8c, 0x4d, 0x7b, 0x98, 0x64, 0x66, 0x82, 0x40, 0x53, 0x61, 0xf8, 0x67, + 0x0d, 0x40, 0x1e, 0x06, 0x81, 0x4b, 0xba, 0xc4, 0x13, 0xd8, 0x95, 0xe7, 0x7b, 0xae, 0x6f, 0x48, + 0x7f, 0x3f, 0x5d, 0x34, 0x9f, 0x0c, 0x29, 0xed, 0x78, 0xf2, 0x99, 0xce, 0x42, 0x51, 0x8e, 0xcf, + 0x28, 0x9d, 0x17, 0x5c, 0x3e, 0xeb, 0x9b, 0xd7, 0xa6, 0x33, 0xff, 0xff, 0x65, 0x9a, 0x4e, 0x65, + 0x47, 0xb1, 0x24, 0xfc, 0x0c, 0xec, 0xc7, 0x7f, 0x77, 0xc8, 0xf7, 0xc5, 0x09, 0x75, 0x09, 0x1f, + 0x70, 0x41, 0xba, 0x7a, 0x59, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x51, 0x2e, 0x0a, 0xcd, 0x61, 0xc3, + 0x2e, 0x30, 0xe2, 0xf6, 0x10, 0xed, 0x9d, 0x49, 0x7f, 0x3a, 0xe6, 0x36, 0x76, 0xc7, 0xa7, 0x96, + 0xdb, 0xd2, 0xc1, 0x3b, 0xa3, 0xa1, 0x61, 0xd4, 0x17, 0x43, 0xd1, 0x75, 0x5a, 0xf0, 0x57, 0x40, + 0xc7, 0xf3, 0xfc, 0x6c, 0x49, 0x3f, 0xdf, 0x8f, 0x7a, 0xce, 0x5c, 0x07, 0x73, 0xd9, 0x30, 0x00, + 0x5b, 0x38, 0xf9, 0x9f, 0xcd, 0xf5, 0x6d, 0xb9, 0x0b, 0xdf, 0x5d, 0xb0, 0x0e, 0xa9, 0x5f, 0x73, + 0x4b, 0x57, 0x69, 0xdc, 0x4a, 0x19, 0x38, 0xca, 0xa8, 0xc3, 0x3e, 0x80, 0x38, 0x7d, 0x2d, 0xc0, + 0x75, 0x78, 0xed, 0x27, 0x26, 0x73, 0x97, 0x30, 0x2d, 0xb5, 0x8c, 0x89, 0xa3, 0x1c, 0x1f, 0xf0, + 0x29, 0xd8, 0x55, 0xa3, 0xcf, 0x3d, 0x8e, 0x2f, 0x48, 0x73, 0xc0, 0x6d, 0xe1, 0x72, 0x7d, 0x47, + 0xf6, 0x37, 0x7d, 0x34, 0x34, 0x76, 0x8f, 0x72, 0xec, 0x28, 0x97, 0x05, 0x3f, 0x01, 0x5b, 0x17, + 0x3e, 0x6b, 0x51, 0xc7, 0x21, 0x5e, 0xac, 0xb4, 0x2b, 0x95, 0x76, 0xa3, 0x4c, 0x9c, 0xa4, 0x6c, + 0x28, 0x83, 0x86, 0x1c, 0xec, 0x29, 0xe5, 0x06, 0xf3, 0xed, 0x33, 0x3f, 0xf4, 0x44, 0xd4, 0x52, + 0xb9, 0xbe, 0x37, 0xf9, 0x8c, 0xec, 0x1d, 0xe5, 0x01, 0x5e, 0x0d, 0x8d, 0xbb, 0x39, 0x2d, 0x3d, + 0x01, 0x42, 0xf9, 0xda, 0xd5, 0x2f, 0x35, 0xa0, 0xcf, 0xeb, 0x1a, 0xf0, 0xa3, 0xc4, 0x45, 0xc0, + 0xdb, 0xa9, 0x8b, 0x80, 0xed, 0x0c, 0xef, 0x3b, 0xb8, 0x06, 0xf8, 0x9b, 0x06, 0xf6, 0xf3, 0xbb, + 0x26, 0x7c, 0x90, 0x88, 0xce, 0x48, 0x45, 0x77, 0x3b, 0xc5, 0x52, 0xb1, 0xfd, 0x0e, 0x94, 0x55, + 0x6f, 0x4d, 0xde, 0xb2, 0x24, 0x62, 0x8c, 0x32, 0x18, 0x1d, 0x8b, 0x94, 0x44, 0xdc, 0x57, 0xe4, + 0x0f, 0x4d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, 0x7f, 0xd7, 0xc0, 0xdb, 0xd7, 0x76, 0x45, 0x68, 0x25, + 0x42, 0x37, 0x53, 0xa1, 0x57, 0xe6, 0x0b, 0x7c, 0x37, 0x97, 0x2d, 0xd6, 0xfb, 0x57, 0x2f, 0x2b, + 0x4b, 0x5f, 0xbd, 0xac, 0x2c, 0x7d, 0xfd, 0xb2, 0xb2, 0xf4, 0xc7, 0x51, 0x45, 0xbb, 0x1a, 0x55, + 0xb4, 0xaf, 0x46, 0x15, 0xed, 0xeb, 0x51, 0x45, 0xfb, 0xcf, 0xa8, 0xa2, 0xfd, 0xe5, 0xbf, 0x95, + 0xa5, 0xdf, 0x14, 0x95, 0xdc, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xba, 0x23, 0xa4, 0x51, + 0x15, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index a59df9840..c1a272750 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -28,19 +28,16 @@ type PodDisruptionBudgetSpec struct { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". - // +optional MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` // Label query over pods whose evictions are managed by the disruption // budget. - // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". - // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` } @@ -84,15 +81,12 @@ type PodDisruptionBudgetStatus struct { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - metav1.TypeMeta `json:",inline"` - // +optional + metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the PodDisruptionBudget. - // +optional Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the PodDisruptionBudget. - // +optional Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -101,7 +95,6 @@ type PodDisruptionBudget struct { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { metav1.TypeMeta `json:",inline"` - // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -117,11 +110,9 @@ type Eviction struct { metav1.TypeMeta `json:",inline"` // ObjectMeta describes the pod that is being evicted. - // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // DeleteOptions may be provided - // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } @@ -183,11 +174,6 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -216,11 +202,6 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -245,11 +226,6 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -269,10 +245,6 @@ type AllowedHostPath struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` } -// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities -// field and means that any capabilities are allowed to be requested. -var AllowAllCapabilities v1.Capability = "*" - // FSType gives strong typing to different file systems that are used by volumes. type FSType string @@ -296,15 +268,8 @@ var ( DownwardAPI FSType = "downwardAPI" FC FSType = "fc" ConfigMap FSType = "configMap" - VsphereVolume FSType = "vsphereVolume" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" - PhotonPersistentDisk FSType = "photonPersistentDisk" - StorageOS FSType = "storageos" - Projected FSType = "projected" - PortworxVolume FSType = "portworxVolume" - ScaleIO FSType = "scaleIO" - CSI FSType = "csi" All FSType = "*" ) @@ -314,12 +279,6 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { @@ -360,16 +319,6 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // min is the start of the range, inclusive. @@ -391,20 +340,6 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsUserStrategyRunAsAny means that container may make requests for any gid. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -421,9 +356,6 @@ type FSGroupStrategyOptions struct { type FSGroupStrategyType string const ( - // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. - // However, when FSGroups are specified, they have to fall in the defined range. - FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. @@ -446,34 +378,12 @@ type SupplementalGroupsStrategyOptions struct { type SupplementalGroupsStrategyType string const ( - // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when gids are specified, they have to fall in the defined range. - SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index eb2eec933..df10b2a29 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -27,15 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", "driver": "driver is the name of the Flexvolume driver.", @@ -171,7 +162,6 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -179,27 +169,15 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicySpec } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", @@ -210,16 +188,6 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 75851e124..9af268a43 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -27,22 +27,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in @@ -191,7 +175,7 @@ func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodDisruptionBudget, len(*in)) @@ -305,7 +289,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -364,11 +348,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -391,11 +370,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) @@ -411,11 +385,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } return } @@ -429,27 +398,6 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in @@ -471,32 +419,6 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 80f43ce92..28ceb269b 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io - package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 708db3276..21010fbee 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -640,6 +641,24 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 7ba7d0543..17163cbb2 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -108,7 +108,6 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -171,7 +170,6 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 83ce310e6..0ec20c88e 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 095a5e9c2..07eb321ea 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 918b8a337..5236a477f 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io - package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index e035b331f..71eced8d4 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -640,6 +641,24 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 521cce4f3..398d6a169 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -110,7 +110,6 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -173,7 +172,6 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index d7b194ae4..1d6ef30b0 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index 0358227fa..97f63331e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index fe7aae975..4b77c9c6b 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io - package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 904a6e7a2..71e5799e3 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -640,6 +641,24 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 35843c90d..857b67a6f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -109,7 +109,6 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -172,7 +171,6 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index c80327593..66dba6ca1 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index 7ffe58106..c085c90b1 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go deleted file mode 100644 index 76c4da002..000000000 --- a/vendor/k8s.io/api/scheduling/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true - -// +groupName=scheduling.k8s.io - -package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go deleted file mode 100644 index bed5f2f39..000000000 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ /dev/null @@ -1,667 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto - - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func init() { - proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") - proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") -} -func (m *PriorityClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ - if m.GlobalDefault { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - if m.PreemptionPolicy != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) - } - return i, nil -} - -func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PriorityClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Value)) - n += 2 - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PriorityClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PriorityClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, - `}`, - }, "") - return s -} -func (this *PriorityClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PriorityClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.GlobalDefault = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PriorityClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PriorityClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 488 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, - 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, - 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, - 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, - 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, - 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, - 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, - 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, - 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, - 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, - 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, - 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, - 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, - 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, - 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, - 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, - 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, - 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, - 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, - 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, - 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, - 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, - 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, - 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, - 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, - 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, - 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, - 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, - 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, - 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1/register.go b/vendor/k8s.io/api/scheduling/v1/register.go deleted file mode 100644 index 33977fe9a..000000000 --- a/vendor/k8s.io/api/scheduling/v1/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "scheduling.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - - // SchemeBuilder is a collection of functions that add things to a scheme. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - // AddToScheme applies all the stored functions to the scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &PriorityClass{}, - &PriorityClassList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go deleted file mode 100644 index e91842ec4..000000000 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClass defines mapping from a priority class name to the priority -// integer value. The value can be any valid integer. -type PriorityClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The value of this priority class. This is the actual priority that pods - // receive when they have the name of this class in their pod spec. - Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` - - // globalDefault specifies whether this PriorityClass should be considered as - // the default priority for pods that do not have any priority class. - // Only one PriorityClass can be marked as `globalDefault`. However, if more than - // one PriorityClasses exists with their `globalDefault` field set to true, - // the smallest value of such global default PriorityClasses will be used as the default priority. - // +optional - GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` - - // description is an arbitrary string that usually provides guidelines on - // when this priority class should be used. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClassList is a collection of priority classes. -type PriorityClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of PriorityClasses - Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go deleted file mode 100644 index 853f255d5..000000000 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", -} - -func (PriorityClass) SwaggerDoc() map[string]string { - return map_PriorityClass -} - -var map_PriorityClassList = map[string]string{ - "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of PriorityClasses", -} - -func (PriorityClassList) SwaggerDoc() map[string]string { - return map_PriorityClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go deleted file mode 100644 index 63bfe6404..000000000 --- a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(corev1.PreemptionPolicy) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. -func (in *PriorityClass) DeepCopy() *PriorityClass { - if in == nil { - return nil - } - out := new(PriorityClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PriorityClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. -func (in *PriorityClassList) DeepCopy() *PriorityClassList { - if in == nil { - return nil - } - out := new(PriorityClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index cff47e1f4..e10d07ff7 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io - package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 3fedb7d60..97c07c984 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -33,8 +34,6 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - import strings "strings" import reflect "reflect" @@ -101,12 +100,6 @@ func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) i += copy(dAtA[i:], m.Description) - if m.PreemptionPolicy != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) - } return i, nil } @@ -148,6 +141,24 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -166,10 +177,6 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -209,7 +216,6 @@ func (this *PriorityClass) String() string { `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -360,36 +366,6 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -632,36 +608,33 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, - 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, - 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, - 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, - 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, - 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, - 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, - 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, - 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, - 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, - 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, - 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, - 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, - 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, - 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, - 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, - 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, - 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, - 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, - 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, - 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, - 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, - 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, - 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, - 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, - 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, - 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, - 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, - 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, + // 447 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x33, 0x5d, 0x0b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0xe8, 0x61, 0x36, 0xac, 0x97, 0x5c, + 0x76, 0xc6, 0x2e, 0x2a, 0x82, 0xb7, 0xb8, 0xb0, 0x08, 0x8a, 0x92, 0x83, 0x07, 0xf1, 0xe0, 0x24, + 0x79, 0x37, 0x1d, 0x9b, 0x64, 0xc2, 0xcc, 0x24, 0xb0, 0x37, 0xcf, 0x9e, 0xfc, 0x52, 0x42, 0x8f, + 0x7b, 0xdc, 0xd3, 0x62, 0xe3, 0x17, 0x91, 0xa4, 0x69, 0xd3, 0x5a, 0xfc, 0x73, 0xcb, 0x3c, 0xef, + 0xef, 0x79, 0xe6, 0xcd, 0xc3, 0xe0, 0x8b, 0xc5, 0x73, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, + 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, + 0xa6, 0x22, 0x4f, 0x58, 0x35, 0xe3, 0x69, 0x31, 0xe7, 0x33, 0x96, 0x40, 0x0e, 0x8a, 0x1b, 0x88, + 0x69, 0xa1, 0xa4, 0x91, 0x36, 0x59, 0xf3, 0x94, 0x17, 0x82, 0xf6, 0x3c, 0xdd, 0xf0, 0xd3, 0xd3, + 0x44, 0x98, 0x79, 0x19, 0xd2, 0x48, 0x66, 0x2c, 0x91, 0x89, 0x64, 0xad, 0x2d, 0x2c, 0x2f, 0xdb, + 0x53, 0x7b, 0x68, 0xbf, 0xd6, 0x71, 0xd3, 0x27, 0xfd, 0xf5, 0x19, 0x8f, 0xe6, 0x22, 0x07, 0x75, + 0xc5, 0x8a, 0x45, 0xd2, 0x08, 0x9a, 0x65, 0x60, 0x38, 0xab, 0x0e, 0x96, 0x98, 0xb2, 0x3f, 0xb9, + 0x54, 0x99, 0x1b, 0x91, 0xc1, 0x81, 0xe1, 0xd9, 0xbf, 0x0c, 0xcd, 0xaf, 0x64, 0xfc, 0x77, 0xdf, + 0xc9, 0xd7, 0x01, 0x9e, 0xbc, 0x53, 0x42, 0x2a, 0x61, 0xae, 0x5e, 0xa6, 0x5c, 0x6b, 0xfb, 0x13, + 0x1e, 0x35, 0x5b, 0xc5, 0xdc, 0x70, 0x07, 0xb9, 0xc8, 0x1b, 0x9f, 0x3d, 0xa6, 0x7d, 0x25, 0xdb, + 0x70, 0x5a, 0x2c, 0x92, 0x46, 0xd0, 0xb4, 0xa1, 0x69, 0x35, 0xa3, 0x6f, 0xc3, 0xcf, 0x10, 0x99, + 0x37, 0x60, 0xb8, 0x6f, 0x2f, 0x6f, 0x8f, 0xad, 0xfa, 0xf6, 0x18, 0xf7, 0x5a, 0xb0, 0x4d, 0xb5, + 0x1f, 0xe1, 0x61, 0xc5, 0xd3, 0x12, 0x9c, 0x81, 0x8b, 0xbc, 0xa1, 0x3f, 0xe9, 0xe0, 0xe1, 0xfb, + 0x46, 0x0c, 0xd6, 0x33, 0xfb, 0x05, 0x9e, 0x24, 0xa9, 0x0c, 0x79, 0x7a, 0x0e, 0x97, 0xbc, 0x4c, + 0x8d, 0x73, 0xe4, 0x22, 0x6f, 0xe4, 0x3f, 0xec, 0xe0, 0xc9, 0xc5, 0xee, 0x30, 0xd8, 0x67, 0xed, + 0xa7, 0x78, 0x1c, 0x83, 0x8e, 0x94, 0x28, 0x8c, 0x90, 0xb9, 0x73, 0xc7, 0x45, 0xde, 0x5d, 0xff, + 0x41, 0x67, 0x1d, 0x9f, 0xf7, 0xa3, 0x60, 0x97, 0x3b, 0xf9, 0x8e, 0xf0, 0xfd, 0xbd, 0x32, 0x5e, + 0x0b, 0x6d, 0xec, 0x8f, 0x07, 0x85, 0xd0, 0xff, 0x2b, 0xa4, 0x71, 0xb7, 0x75, 0xdc, 0xeb, 0x6e, + 0x1e, 0x6d, 0x94, 0x9d, 0x32, 0x02, 0x3c, 0x14, 0x06, 0x32, 0xed, 0x0c, 0xdc, 0x23, 0x6f, 0x7c, + 0x76, 0x4a, 0xff, 0xfe, 0xfc, 0xe8, 0xde, 0x7e, 0x7d, 0x77, 0xaf, 0x9a, 0x8c, 0x60, 0x1d, 0xe5, + 0xd3, 0xe5, 0x8a, 0x58, 0xd7, 0x2b, 0x62, 0xdd, 0xac, 0x88, 0xf5, 0xa5, 0x26, 0x68, 0x59, 0x13, + 0x74, 0x5d, 0x13, 0x74, 0x53, 0x13, 0xf4, 0xa3, 0x26, 0xe8, 0xdb, 0x4f, 0x62, 0x7d, 0x18, 0x6d, + 0x32, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xab, 0x20, 0x12, 0x63, 0x3c, 0x03, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index c1a09bce8..21e3df0af 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1alpha1 import ( - apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -25,7 +24,6 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { @@ -51,13 +49,6 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index f9880922a..f406f4402 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -28,12 +28,11 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index 039282397..fe0c86040 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,6 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,11 +29,6 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(v1.PreemptionPolicy) - **out = **in - } return } @@ -60,7 +54,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go index e66196898..f2dd1cfac 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io - package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index 58bbf835d..ea8f8d5e6 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -33,8 +34,6 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - import strings "strings" import reflect "reflect" @@ -101,12 +100,6 @@ func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) i += copy(dAtA[i:], m.Description) - if m.PreemptionPolicy != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) - } return i, nil } @@ -148,6 +141,24 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -166,10 +177,6 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -209,7 +216,6 @@ func (this *PriorityClass) String() string { `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -360,36 +366,6 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -632,36 +608,33 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, - 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, - 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, - 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, - 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, - 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, - 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, - 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, - 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, - 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, - 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, - 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, - 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, - 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, - 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, - 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, - 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, - 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, - 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, - 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, - 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, - 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, - 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, - 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, - 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, - 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, - 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, - 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, - 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, - 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8b, 0xd3, 0x40, + 0x18, 0xc5, 0x33, 0x5d, 0x8b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0x28, 0x38, 0x1b, 0xd6, 0x4b, 0x0e, + 0xee, 0x8c, 0x5d, 0x54, 0x04, 0x6f, 0x71, 0x51, 0x04, 0x45, 0xcd, 0xc1, 0x83, 0x78, 0x70, 0x92, + 0x7c, 0x9b, 0x8e, 0x4d, 0x32, 0x61, 0x66, 0x12, 0xd8, 0x9b, 0x67, 0x4f, 0xfe, 0x51, 0x1e, 0x7a, + 0xdc, 0xe3, 0x9e, 0x16, 0x1b, 0xff, 0x11, 0x49, 0x1a, 0x37, 0xad, 0x45, 0xdd, 0x5b, 0xe6, 0x7d, + 0xbf, 0xf7, 0xe6, 0xcb, 0x63, 0xf0, 0xf3, 0xc5, 0x13, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, + 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, + 0xa6, 0x22, 0x4f, 0x58, 0x35, 0x0b, 0xc1, 0xf0, 0x19, 0x4b, 0x20, 0x07, 0xc5, 0x0d, 0xc4, 0xb4, + 0x50, 0xd2, 0x48, 0xfb, 0xee, 0x1a, 0xa7, 0xbc, 0x10, 0xb4, 0xc7, 0x69, 0x87, 0x4f, 0x0f, 0x13, + 0x61, 0xe6, 0x65, 0x48, 0x23, 0x99, 0xb1, 0x44, 0x26, 0x92, 0xb5, 0xae, 0xb0, 0x3c, 0x69, 0x4f, + 0xed, 0xa1, 0xfd, 0x5a, 0xa7, 0x4d, 0x1f, 0xf6, 0x97, 0x67, 0x3c, 0x9a, 0x8b, 0x1c, 0xd4, 0x29, + 0x2b, 0x16, 0x49, 0x23, 0x68, 0x96, 0x81, 0xe1, 0xac, 0xda, 0xd9, 0x61, 0xca, 0xfe, 0xe6, 0x52, + 0x65, 0x6e, 0x44, 0x06, 0x3b, 0x86, 0xc7, 0xff, 0x33, 0x34, 0x7f, 0x92, 0xf1, 0x3f, 0x7d, 0x07, + 0x5f, 0x07, 0x78, 0xf2, 0x56, 0x09, 0xa9, 0x84, 0x39, 0x7d, 0x96, 0x72, 0xad, 0xed, 0x4f, 0x78, + 0xd4, 0x6c, 0x15, 0x73, 0xc3, 0x1d, 0xe4, 0x22, 0x6f, 0x7c, 0xf4, 0x80, 0xf6, 0x8d, 0x5c, 0x86, + 0xd3, 0x62, 0x91, 0x34, 0x82, 0xa6, 0x0d, 0x4d, 0xab, 0x19, 0x7d, 0x13, 0x7e, 0x86, 0xc8, 0xbc, + 0x06, 0xc3, 0x7d, 0x7b, 0x79, 0xb1, 0x6f, 0xd5, 0x17, 0xfb, 0xb8, 0xd7, 0x82, 0xcb, 0x54, 0xfb, + 0x1e, 0x1e, 0x56, 0x3c, 0x2d, 0xc1, 0x19, 0xb8, 0xc8, 0x1b, 0xfa, 0x93, 0x0e, 0x1e, 0xbe, 0x6f, + 0xc4, 0x60, 0x3d, 0xb3, 0x9f, 0xe2, 0x49, 0x92, 0xca, 0x90, 0xa7, 0xc7, 0x70, 0xc2, 0xcb, 0xd4, + 0x38, 0x7b, 0x2e, 0xf2, 0x46, 0xfe, 0x9d, 0x0e, 0x9e, 0xbc, 0xd8, 0x1c, 0x06, 0xdb, 0xac, 0xfd, + 0x08, 0x8f, 0x63, 0xd0, 0x91, 0x12, 0x85, 0x11, 0x32, 0x77, 0xae, 0xb9, 0xc8, 0xbb, 0xe1, 0xdf, + 0xee, 0xac, 0xe3, 0xe3, 0x7e, 0x14, 0x6c, 0x72, 0x07, 0xdf, 0x11, 0xbe, 0xb5, 0x55, 0xc6, 0x2b, + 0xa1, 0x8d, 0xfd, 0x71, 0xa7, 0x10, 0x7a, 0xb5, 0x42, 0x1a, 0x77, 0x5b, 0xc7, 0xcd, 0xee, 0xe6, + 0xd1, 0x6f, 0x65, 0xa3, 0x8c, 0x77, 0x78, 0x28, 0x0c, 0x64, 0xda, 0x19, 0xb8, 0x7b, 0xde, 0xf8, + 0xe8, 0x3e, 0xfd, 0xe7, 0xeb, 0xa3, 0x5b, 0xeb, 0xf5, 0xd5, 0xbd, 0x6c, 0x22, 0x82, 0x75, 0x92, + 0x7f, 0xb8, 0x5c, 0x11, 0xeb, 0x6c, 0x45, 0xac, 0xf3, 0x15, 0xb1, 0xbe, 0xd4, 0x04, 0x2d, 0x6b, + 0x82, 0xce, 0x6a, 0x82, 0xce, 0x6b, 0x82, 0x7e, 0xd4, 0x04, 0x7d, 0xfb, 0x49, 0xac, 0x0f, 0xd7, + 0xbb, 0xc8, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x41, 0x74, 0x8a, 0x60, 0x38, 0x03, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index f806ecd4c..a9aaa8665 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1beta1 import ( - apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -25,13 +24,12 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -51,13 +49,6 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -66,7 +57,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index ffded9df0..c18f54a82 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -28,12 +28,11 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -42,7 +41,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go index 6e2008578..6f68e4ac5 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,6 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,11 +29,6 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(v1.PreemptionPolicy) - **out = **in - } return } @@ -60,7 +54,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index 60066bb6d..05a62c569 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io - package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index c84213105..15285bae5 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto +// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -215,6 +216,24 @@ func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go index 8cc99d440..506aacf4a 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types.go @@ -61,7 +61,7 @@ type PodPresetSpec struct { type PodPresetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go index 0501e0af3..508c452f1 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go @@ -37,7 +37,7 @@ func (PodPreset) SwaggerDoc() map[string]string { var map_PodPresetList = map[string]string{ "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index ed6c31a32..6397a88ab 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -56,7 +56,7 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodPreset, len(*in)) diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 75a6489da..8f4a4045c 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true - -package v1 // import "k8s.io/api/storage/v1" +package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 96bba0537..d43a98298 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -26,12 +27,6 @@ limitations under the License. It has these top-level messages: StorageClass StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError */ package v1 @@ -67,39 +62,9 @@ func (m *StorageClassList) Reset() { *m = StorageClassList{} func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") - proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") - proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") - proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") - proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") - proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") - proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -240,252 +205,24 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) - } - if m.InlineVolumeSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InlineVolumeSpec.Size())) - n7, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n8, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil -} - -func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.AttachmentMetadata) > 0 { - keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) - for k := range m.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { - dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n9, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n10, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - return i, nil -} - -func (m *VolumeError) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n11, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 } - func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -550,91 +287,6 @@ func (m *StorageClassList) Size() (n int) { return n } -func (m *VolumeAttachment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachmentSource) Size() (n int) { - var l int - _ = l - if m.PersistentVolumeName != nil { - l = len(*m.PersistentVolumeName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.InlineVolumeSpec != nil { - l = m.InlineVolumeSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeAttachmentSpec) Size() (n int) { - var l int - _ = l - l = len(m.Attacher) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentStatus) Size() (n int) { - var l int - _ = l - n += 2 - if len(m.AttachmentMetadata) > 0 { - for k, v := range m.AttachmentMetadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AttachError != nil { - l = m.AttachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DetachError != nil { - l = m.DetachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeError) Size() (n int) { - var l int - _ = l - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -679,945 +331,22 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "k8s_io_api_core_v1.PersistentVolumeSpec", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" - } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) - } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeError) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *StorageClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Parameters[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - m.ReclaimPolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowVolumeExpansion = &b - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := VolumeBindingMode(dAtA[iNdEx:postIndex]) - m.VolumeBindingMode = &s - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) - if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, VolumeAttachment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.PersistentVolumeName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &k8s_io_api_core_v1.PersistentVolumeSpec{} - } - if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - return nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { +func (m *StorageClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1640,17 +369,17 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1660,26 +389,27 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Attacher = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1689,27 +419,26 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Provisioner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1719,76 +448,19 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) - } - var v int + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1798,17 +470,12 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Attached = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1818,26 +485,26 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - if m.AttachmentMetadata == nil { - m.AttachmentMetadata = make(map[string]string) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Parameters == nil { + m.Parameters = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1847,86 +514,47 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Parameters[mapkey] = mapvalue + } else { + var mapvalue string + m.Parameters[mapkey] = mapvalue } - m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1936,28 +564,105 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.AttachError == nil { - m.AttachError = &VolumeError{} + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) } - if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1981,10 +686,8 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DetachError == nil { - m.DetachError = &VolumeError{} - } - if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2009,7 +712,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *VolumeError) Unmarshal(dAtA []byte) error { +func (m *StorageClassList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2032,15 +735,15 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2064,15 +767,15 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2082,20 +785,22 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2228,69 +933,46 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1018 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0xc5, - 0x1b, 0xcf, 0xc6, 0x79, 0x71, 0xc6, 0xc9, 0xff, 0x9c, 0xf9, 0x07, 0x30, 0x2e, 0xec, 0xc8, 0x14, - 0x98, 0x83, 0xdb, 0xbd, 0x84, 0x03, 0x9d, 0x90, 0x40, 0xf2, 0x82, 0x25, 0x4e, 0x8a, 0xef, 0xa2, - 0x49, 0x38, 0x21, 0x44, 0xc1, 0x64, 0xf7, 0x61, 0xb3, 0x67, 0xef, 0xce, 0x32, 0x33, 0x36, 0xa4, - 0xa3, 0xa2, 0x43, 0x82, 0x96, 0x8f, 0x42, 0x49, 0x15, 0xba, 0x13, 0xd5, 0x55, 0x16, 0x59, 0x6a, - 0xbe, 0x40, 0x2a, 0x34, 0xb3, 0x13, 0x7b, 0x63, 0x6f, 0xc0, 0x69, 0xae, 0xf3, 0xf3, 0xf2, 0xfb, - 0x3d, 0xef, 0xb3, 0x46, 0x1f, 0xf5, 0x1f, 0x0a, 0x3b, 0x64, 0x4e, 0x7f, 0x78, 0x02, 0x3c, 0x06, - 0x09, 0xc2, 0x19, 0x41, 0xec, 0x33, 0xee, 0x18, 0x03, 0x4d, 0x42, 0x47, 0x48, 0xc6, 0x69, 0x00, - 0xce, 0x68, 0xcf, 0x09, 0x20, 0x06, 0x4e, 0x25, 0xf8, 0x76, 0xc2, 0x99, 0x64, 0xf8, 0x95, 0xcc, - 0xcd, 0xa6, 0x49, 0x68, 0x1b, 0x37, 0x7b, 0xb4, 0x57, 0xbf, 0x17, 0x84, 0xf2, 0x74, 0x78, 0x62, - 0x7b, 0x2c, 0x72, 0x02, 0x16, 0x30, 0x47, 0x7b, 0x9f, 0x0c, 0xbf, 0xd6, 0x92, 0x16, 0xf4, 0xaf, - 0x8c, 0xa5, 0xde, 0xca, 0x05, 0xf3, 0x18, 0x2f, 0x8a, 0x54, 0x7f, 0x30, 0xf5, 0x89, 0xa8, 0x77, - 0x1a, 0xc6, 0xc0, 0xcf, 0x9c, 0xa4, 0x1f, 0x28, 0x85, 0x70, 0x22, 0x90, 0xb4, 0x08, 0xe5, 0xdc, - 0x84, 0xe2, 0xc3, 0x58, 0x86, 0x11, 0xcc, 0x01, 0xde, 0xff, 0x2f, 0x80, 0xf0, 0x4e, 0x21, 0xa2, - 0xb3, 0xb8, 0xd6, 0x8f, 0x6b, 0x68, 0xf3, 0x28, 0x6b, 0xc0, 0xc7, 0x03, 0x2a, 0x04, 0xfe, 0x0a, - 0x95, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x66, 0xed, 0x5a, 0xed, 0xca, 0xfe, 0x7d, 0x7b, 0xda, 0xac, - 0x09, 0xb7, 0x9d, 0xf4, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x47, 0x7b, 0xf6, 0x93, 0x93, 0x67, - 0xe0, 0xc9, 0x1e, 0x48, 0xea, 0xe2, 0xf3, 0x71, 0x73, 0x29, 0x1d, 0x37, 0xd1, 0x54, 0x47, 0x26, - 0xac, 0xf8, 0x3d, 0x54, 0x49, 0x38, 0x1b, 0x85, 0x22, 0x64, 0x31, 0xf0, 0xda, 0xf2, 0xae, 0xd5, - 0xde, 0x70, 0xff, 0x6f, 0x20, 0x95, 0xc3, 0xa9, 0x89, 0xe4, 0xfd, 0x70, 0x80, 0x50, 0x42, 0x39, - 0x8d, 0x40, 0x02, 0x17, 0xb5, 0xd2, 0x6e, 0xa9, 0x5d, 0xd9, 0x7f, 0xd7, 0x2e, 0x9c, 0xa3, 0x9d, - 0xaf, 0xc8, 0x3e, 0x9c, 0xa0, 0xba, 0xb1, 0xe4, 0x67, 0xd3, 0xec, 0xa6, 0x06, 0x92, 0xa3, 0xc6, - 0x7d, 0xb4, 0xc5, 0xc1, 0x1b, 0xd0, 0x30, 0x3a, 0x64, 0x83, 0xd0, 0x3b, 0xab, 0xad, 0xe8, 0x0c, - 0xbb, 0xe9, 0xb8, 0xb9, 0x45, 0xf2, 0x86, 0xcb, 0x71, 0xf3, 0xfe, 0xfc, 0x06, 0xd8, 0x87, 0xc0, - 0x45, 0x28, 0x24, 0xc4, 0xf2, 0x29, 0x1b, 0x0c, 0x23, 0xb8, 0x86, 0x21, 0xd7, 0xb9, 0xf1, 0x03, - 0xb4, 0x19, 0xb1, 0x61, 0x2c, 0x9f, 0x24, 0x32, 0x64, 0xb1, 0xa8, 0xad, 0xee, 0x96, 0xda, 0x1b, - 0x6e, 0x35, 0x1d, 0x37, 0x37, 0x7b, 0x39, 0x3d, 0xb9, 0xe6, 0x85, 0x0f, 0xd0, 0x0e, 0x1d, 0x0c, - 0xd8, 0xb7, 0x59, 0x80, 0xee, 0x77, 0x09, 0x8d, 0x55, 0x97, 0x6a, 0x6b, 0xbb, 0x56, 0xbb, 0xec, - 0xd6, 0xd2, 0x71, 0x73, 0xa7, 0x53, 0x60, 0x27, 0x85, 0x28, 0xfc, 0x39, 0xda, 0x1e, 0x69, 0x95, - 0x1b, 0xc6, 0x7e, 0x18, 0x07, 0x3d, 0xe6, 0x43, 0x6d, 0x5d, 0x17, 0x7d, 0x37, 0x1d, 0x37, 0xb7, - 0x9f, 0xce, 0x1a, 0x2f, 0x8b, 0x94, 0x64, 0x9e, 0x04, 0x7f, 0x83, 0xb6, 0x75, 0x44, 0xf0, 0x8f, - 0x59, 0xc2, 0x06, 0x2c, 0x08, 0x41, 0xd4, 0xca, 0x7a, 0x74, 0xed, 0xfc, 0xe8, 0x54, 0xeb, 0xd4, - 0xdc, 0x8c, 0xd7, 0xd9, 0x11, 0x0c, 0xc0, 0x93, 0x8c, 0x1f, 0x03, 0x8f, 0xdc, 0xd7, 0xcd, 0xbc, - 0xb6, 0x3b, 0xb3, 0x54, 0x64, 0x9e, 0xbd, 0xfe, 0x21, 0xba, 0x33, 0x33, 0x70, 0x5c, 0x45, 0xa5, - 0x3e, 0x9c, 0xe9, 0x6d, 0xde, 0x20, 0xea, 0x27, 0xde, 0x41, 0xab, 0x23, 0x3a, 0x18, 0x42, 0xb6, - 0x7c, 0x24, 0x13, 0x3e, 0x58, 0x7e, 0x68, 0xb5, 0x7e, 0xb5, 0x50, 0x35, 0xbf, 0x3d, 0x07, 0xa1, - 0x90, 0xf8, 0xcb, 0xb9, 0x9b, 0xb0, 0x17, 0xbb, 0x09, 0x85, 0xd6, 0x17, 0x51, 0x35, 0x35, 0x94, - 0xaf, 0x34, 0xb9, 0x7b, 0xf8, 0x14, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5b, 0xd6, 0x8d, 0x79, 0x63, - 0x81, 0x9d, 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x23, 0x85, 0x24, 0x19, 0x41, 0xeb, 0x97, 0x65, 0x54, - 0xcd, 0xe6, 0xd2, 0x91, 0x92, 0x7a, 0xa7, 0x11, 0xc4, 0xf2, 0x25, 0x1c, 0x74, 0x0f, 0xad, 0x88, - 0x04, 0x3c, 0xdd, 0xcc, 0xca, 0xfe, 0xdb, 0x37, 0xe4, 0x3f, 0x9b, 0xd8, 0x51, 0x02, 0x9e, 0xbb, - 0x69, 0x88, 0x57, 0x94, 0x44, 0x34, 0x0d, 0xfe, 0x0c, 0xad, 0x09, 0x49, 0xe5, 0x50, 0x1d, 0xb9, - 0x22, 0xbc, 0xb7, 0x28, 0xa1, 0x06, 0xb9, 0xff, 0x33, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xeb, - 0x37, 0x0b, 0xed, 0xcc, 0x42, 0x5e, 0xc2, 0x74, 0x0f, 0xae, 0x4f, 0xf7, 0xcd, 0x05, 0x8b, 0xb9, - 0x61, 0xc2, 0x7f, 0x58, 0xe8, 0xd5, 0xb9, 0xba, 0xd9, 0x90, 0x7b, 0xa0, 0xde, 0x84, 0x64, 0xe6, - 0xe5, 0x79, 0x4c, 0x23, 0xc8, 0xd6, 0x3e, 0x7b, 0x13, 0x0e, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0x9f, - 0xa1, 0x6a, 0x18, 0x0f, 0xc2, 0x18, 0x32, 0xdd, 0xd1, 0x74, 0xbe, 0x85, 0x87, 0x3b, 0xcb, 0xac, - 0x87, 0xbb, 0x93, 0x8e, 0x9b, 0xd5, 0x47, 0x33, 0x2c, 0x64, 0x8e, 0xb7, 0xf5, 0x7b, 0xc1, 0x64, - 0x94, 0x01, 0xbf, 0x83, 0xca, 0x54, 0x6b, 0x80, 0x9b, 0x32, 0x26, 0x9d, 0xee, 0x18, 0x3d, 0x99, - 0x78, 0xe8, 0xbd, 0xd1, 0xad, 0x30, 0x89, 0x2e, 0xbc, 0x37, 0x1a, 0x94, 0xdb, 0x1b, 0x2d, 0x13, - 0x43, 0xa6, 0x92, 0x88, 0x99, 0x9f, 0xf5, 0xb2, 0x74, 0x3d, 0x89, 0xc7, 0x46, 0x4f, 0x26, 0x1e, - 0xad, 0xbf, 0x4b, 0x05, 0x03, 0xd2, 0x0b, 0x98, 0xab, 0xc6, 0xd7, 0xd5, 0x94, 0xe7, 0xaa, 0xf1, - 0x27, 0xd5, 0xf8, 0xf8, 0x67, 0x0b, 0x61, 0x3a, 0xa1, 0xe8, 0x5d, 0x2d, 0x68, 0xb6, 0x45, 0xdd, - 0x5b, 0x9d, 0x84, 0xdd, 0x99, 0xe3, 0xc9, 0xbe, 0x84, 0x75, 0x13, 0x1f, 0xcf, 0x3b, 0x90, 0x82, - 0xe0, 0xd8, 0x47, 0x95, 0x4c, 0xdb, 0xe5, 0x9c, 0x71, 0x73, 0x9e, 0xad, 0x7f, 0xcd, 0x45, 0x7b, - 0xba, 0x0d, 0xf5, 0x65, 0xef, 0x4c, 0xa1, 0x97, 0xe3, 0x66, 0x25, 0x67, 0x27, 0x79, 0x5a, 0x15, - 0xc5, 0x87, 0x69, 0x94, 0x95, 0xdb, 0x45, 0xf9, 0x04, 0x6e, 0x8e, 0x92, 0xa3, 0xad, 0x77, 0xd1, - 0x6b, 0x37, 0xb4, 0xe5, 0x56, 0xdf, 0x8b, 0x1f, 0x2c, 0x94, 0x8f, 0x81, 0x0f, 0xd0, 0x8a, 0xfa, - 0xbb, 0x65, 0x1e, 0x92, 0xbb, 0x8b, 0x3d, 0x24, 0xc7, 0x61, 0x04, 0xd3, 0xa7, 0x50, 0x49, 0x44, - 0xb3, 0xe0, 0xb7, 0xd0, 0x7a, 0x04, 0x42, 0xd0, 0xc0, 0x44, 0x76, 0xef, 0x18, 0xa7, 0xf5, 0x5e, - 0xa6, 0x26, 0x57, 0x76, 0xb7, 0x7d, 0x7e, 0xd1, 0x58, 0x7a, 0x7e, 0xd1, 0x58, 0x7a, 0x71, 0xd1, - 0x58, 0xfa, 0x3e, 0x6d, 0x58, 0xe7, 0x69, 0xc3, 0x7a, 0x9e, 0x36, 0xac, 0x17, 0x69, 0xc3, 0xfa, - 0x33, 0x6d, 0x58, 0x3f, 0xfd, 0xd5, 0x58, 0xfa, 0x62, 0x79, 0xb4, 0xf7, 0x4f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xe2, 0xd4, 0x42, 0x3d, 0x3c, 0x0b, 0x00, 0x00, + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0x8e, 0x93, 0x9b, 0xde, 0x74, 0xd2, 0xea, 0x26, 0xbe, 0xbd, 0x92, 0x6f, 0x16, 0x4e, 0x54, + 0x36, 0x11, 0x12, 0xe3, 0xa6, 0x14, 0x54, 0x21, 0x81, 0x54, 0xa3, 0x4a, 0x20, 0xb5, 0x6a, 0xe4, + 0x56, 0x15, 0x42, 0x2c, 0x98, 0x38, 0x07, 0x77, 0x88, 0xed, 0x31, 0x33, 0x63, 0x43, 0x76, 0xbc, + 0x00, 0x12, 0xcf, 0xc3, 0x13, 0x74, 0xd9, 0x65, 0x57, 0x16, 0x35, 0x6f, 0xd1, 0x15, 0xf2, 0x0f, + 0x8d, 0x9b, 0x04, 0xd1, 0xdd, 0xcc, 0x77, 0xbe, 0xef, 0x3b, 0x33, 0xe7, 0x07, 0x3d, 0x9b, 0xec, + 0x0a, 0x4c, 0x99, 0x31, 0x09, 0x47, 0xc0, 0x7d, 0x90, 0x20, 0x8c, 0x08, 0xfc, 0x31, 0xe3, 0x46, + 0x11, 0x20, 0x01, 0x35, 0x84, 0x64, 0x9c, 0x38, 0x60, 0x44, 0x03, 0xc3, 0x01, 0x1f, 0x38, 0x91, + 0x30, 0xc6, 0x01, 0x67, 0x92, 0xa9, 0xff, 0xe5, 0x34, 0x4c, 0x02, 0x8a, 0x0b, 0x1a, 0x8e, 0x06, + 0x9d, 0x07, 0x0e, 0x95, 0x67, 0xe1, 0x08, 0xdb, 0xcc, 0x33, 0x1c, 0xe6, 0x30, 0x23, 0x63, 0x8f, + 0xc2, 0x77, 0xd9, 0x2d, 0xbb, 0x64, 0xa7, 0xdc, 0xa5, 0xb3, 0x59, 0x4a, 0x66, 0x33, 0xbe, 0x2c, + 0x53, 0x67, 0x67, 0xc6, 0xf1, 0x88, 0x7d, 0x46, 0x7d, 0xe0, 0x53, 0x23, 0x98, 0x38, 0x29, 0x20, + 0x0c, 0x0f, 0x24, 0x59, 0xa6, 0x32, 0x7e, 0xa7, 0xe2, 0xa1, 0x2f, 0xa9, 0x07, 0x0b, 0x82, 0xc7, + 0x7f, 0x12, 0x08, 0xfb, 0x0c, 0x3c, 0x32, 0xaf, 0xdb, 0xfc, 0xb2, 0x82, 0xd6, 0x8e, 0xf3, 0x02, + 0x3c, 0x77, 0x89, 0x10, 0xea, 0x5b, 0xd4, 0x48, 0x1f, 0x35, 0x26, 0x92, 0x68, 0x4a, 0x4f, 0xe9, + 0x37, 0xb7, 0xb7, 0xf0, 0xac, 0x58, 0x37, 0xde, 0x38, 0x98, 0x38, 0x29, 0x20, 0x70, 0xca, 0xc6, + 0xd1, 0x00, 0x1f, 0x8d, 0xde, 0x83, 0x2d, 0x0f, 0x41, 0x12, 0x53, 0x3d, 0x8f, 0xbb, 0x95, 0x24, + 0xee, 0xa2, 0x19, 0x66, 0xdd, 0xb8, 0xaa, 0x8f, 0x50, 0x33, 0xe0, 0x2c, 0xa2, 0x82, 0x32, 0x1f, + 0xb8, 0x56, 0xed, 0x29, 0xfd, 0x55, 0xf3, 0xdf, 0x42, 0xd2, 0x1c, 0xce, 0x42, 0x56, 0x99, 0xa7, + 0x3a, 0x08, 0x05, 0x84, 0x13, 0x0f, 0x24, 0x70, 0xa1, 0xd5, 0x7a, 0xb5, 0x7e, 0x73, 0xfb, 0x21, + 0x5e, 0xda, 0x47, 0x5c, 0xfe, 0x11, 0x1e, 0xde, 0xa8, 0xf6, 0x7d, 0xc9, 0xa7, 0xb3, 0xd7, 0xcd, + 0x02, 0x56, 0xc9, 0x5a, 0x9d, 0xa0, 0x75, 0x0e, 0xb6, 0x4b, 0xa8, 0x37, 0x64, 0x2e, 0xb5, 0xa7, + 0xda, 0x5f, 0xd9, 0x0b, 0xf7, 0x93, 0xb8, 0xbb, 0x6e, 0x95, 0x03, 0xd7, 0x71, 0x77, 0x6b, 0x71, + 0x02, 0xf0, 0x10, 0xb8, 0xa0, 0x42, 0x82, 0x2f, 0x4f, 0x99, 0x1b, 0x7a, 0x70, 0x4b, 0x63, 0xdd, + 0xf6, 0x56, 0x77, 0xd0, 0x9a, 0xc7, 0x42, 0x5f, 0x1e, 0x05, 0x92, 0x32, 0x5f, 0x68, 0xf5, 0x5e, + 0xad, 0xbf, 0x6a, 0xb6, 0x92, 0xb8, 0xbb, 0x76, 0x58, 0xc2, 0xad, 0x5b, 0x2c, 0xf5, 0x00, 0x6d, + 0x10, 0xd7, 0x65, 0x1f, 0xf3, 0x04, 0xfb, 0x9f, 0x02, 0xe2, 0xa7, 0x55, 0xd2, 0x56, 0x7a, 0x4a, + 0xbf, 0x61, 0x6a, 0x49, 0xdc, 0xdd, 0xd8, 0x5b, 0x12, 0xb7, 0x96, 0xaa, 0xd4, 0x57, 0xa8, 0x1d, + 0x65, 0x90, 0x49, 0xfd, 0x31, 0xf5, 0x9d, 0x43, 0x36, 0x06, 0xed, 0xef, 0xec, 0xd3, 0xf7, 0x93, + 0xb8, 0xdb, 0x3e, 0x9d, 0x0f, 0x5e, 0x2f, 0x03, 0xad, 0x45, 0x13, 0xf5, 0x03, 0x6a, 0x67, 0x19, + 0x61, 0x7c, 0xc2, 0x02, 0xe6, 0x32, 0x87, 0x82, 0xd0, 0x1a, 0x59, 0xeb, 0xfa, 0xe5, 0xd6, 0xa5, + 0xa5, 0x4b, 0xfb, 0x56, 0xb0, 0xa6, 0xc7, 0xe0, 0x82, 0x2d, 0x19, 0x3f, 0x01, 0xee, 0x99, 0xff, + 0x17, 0xfd, 0x6a, 0xef, 0xcd, 0x5b, 0x59, 0x8b, 0xee, 0x9d, 0xa7, 0xe8, 0x9f, 0xb9, 0x86, 0xab, + 0x2d, 0x54, 0x9b, 0xc0, 0x34, 0x9b, 0xe6, 0x55, 0x2b, 0x3d, 0xaa, 0x1b, 0xa8, 0x1e, 0x11, 0x37, + 0x84, 0x7c, 0xf8, 0xac, 0xfc, 0xf2, 0xa4, 0xba, 0xab, 0x6c, 0x7e, 0x53, 0x50, 0xab, 0x3c, 0x3d, + 0x07, 0x54, 0x48, 0xf5, 0xcd, 0xc2, 0x4e, 0xe0, 0xbb, 0xed, 0x44, 0xaa, 0xce, 0x36, 0xa2, 0x55, + 0xfc, 0xa1, 0xf1, 0x0b, 0x29, 0xed, 0xc3, 0x0b, 0x54, 0xa7, 0x12, 0x3c, 0xa1, 0x55, 0xb3, 0xc2, + 0xdc, 0xbb, 0xc3, 0x4c, 0x9b, 0xeb, 0x85, 0x5f, 0xfd, 0x65, 0xaa, 0xb4, 0x72, 0x03, 0xb3, 0x7f, + 0x7e, 0xa5, 0x57, 0x2e, 0xae, 0xf4, 0xca, 0xe5, 0x95, 0x5e, 0xf9, 0x9c, 0xe8, 0xca, 0x79, 0xa2, + 0x2b, 0x17, 0x89, 0xae, 0x5c, 0x26, 0xba, 0xf2, 0x3d, 0xd1, 0x95, 0xaf, 0x3f, 0xf4, 0xca, 0xeb, + 0x6a, 0x34, 0xf8, 0x19, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x64, 0x41, 0x83, 0x40, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index 473c68727..c058add84 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -46,9 +46,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, &StorageClassList{}, - - &VolumeAttachment{}, - &VolumeAttachmentList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 21531c9e1..30e6d6d29 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -102,117 +102,3 @@ const ( // binding will occur during Pod scheduing. VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" ) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachment captures the intent to attach or detach the specified volume -// to/from the specified node. -// -// VolumeAttachment objects are non-namespaced. -type VolumeAttachment struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired attach/detach volume behavior. - // Populated by the Kubernetes system. - Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status of the VolumeAttachment request. - // Populated by the entity completing the attach or detach - // operation, i.e. the external-attacher. - // +optional - Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachmentList is a collection of VolumeAttachment objects. -type VolumeAttachmentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of VolumeAttachments - Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeAttachmentSpec is the specification of a VolumeAttachment request. -type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this - // request. This is the name returned by GetPluginName(). - Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - - // Source represents the volume that should be attached. - Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - - // The node that the volume should be attached to. - NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` -} - -// VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. -// Exactly one member can be set. -type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. - // +optional - PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` -} - -// VolumeAttachmentStatus is the status of a VolumeAttachment request. -type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed - // into subsequent WaitForAttach or Mount calls. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - - // The last error encountered during attach operation, if any. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - - // The last error encountered during detach operation, if any. - // This field must only be set by the entity completing the detach - // operation, i.e. the external-attacher. - // +optional - DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` -} - -// VolumeError captures an error encountered during a volume operation. -type VolumeError struct { - // Time the error was encountered. - // +optional - Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - - // String detailing the error encountered during Attach or Detach operation. - // This string may be logged, so it should not contain sensitive - // information. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index e31dd7f71..23b76e28d 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -53,67 +53,4 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } -var map_VolumeAttachment = map[string]string{ - "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachment) SwaggerDoc() map[string]string { - return map_VolumeAttachment -} - -var map_VolumeAttachmentList = map[string]string{ - "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", -} - -func (VolumeAttachmentList) SwaggerDoc() map[string]string { - return map_VolumeAttachmentList -} - -var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", -} - -func (VolumeAttachmentSource) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSource -} - -var map_VolumeAttachmentSpec = map[string]string{ - "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", -} - -func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSpec -} - -var map_VolumeAttachmentStatus = map[string]string{ - "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { - return map_VolumeAttachmentStatus -} - -var map_VolumeError = map[string]string{ - "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", -} - -func (VolumeError) SwaggerDoc() map[string]string { - return map_VolumeError -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index eb8626e6e..0e850dc34 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -89,7 +89,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -117,157 +117,3 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. -func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { - if in == nil { - return nil - } - out := new(VolumeAttachment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeAttachment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. -func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { - if in == nil { - return nil - } - out := new(VolumeAttachmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { - *out = *in - if in.PersistentVolumeName != nil { - in, out := &in.PersistentVolumeName, &out.PersistentVolumeName - *out = new(string) - **out = **in - } - if in.InlineVolumeSpec != nil { - in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec - *out = new(corev1.PersistentVolumeSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. -func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { - if in == nil { - return nil - } - out := new(VolumeAttachmentSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { - *out = *in - in.Source.DeepCopyInto(&out.Source) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. -func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { - if in == nil { - return nil - } - out := new(VolumeAttachmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { - *out = *in - if in.AttachmentMetadata != nil { - in, out := &in.AttachmentMetadata, &out.AttachmentMetadata - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AttachError != nil { - in, out := &in.AttachError, &out.AttachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - if in.DetachError != nil { - in, out := &in.DetachError, &out.DetachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. -func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { - if in == nil { - return nil - } - out := new(VolumeAttachmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeError) DeepCopyInto(out *VolumeError) { - *out = *in - in.Time.DeepCopyInto(&out.Time) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. -func (in *VolumeError) DeepCopy() *VolumeError { - if in == nil { - return nil - } - out := new(VolumeError) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go index 6f7ad7e73..aa94aff7f 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package +// +k8s:deepcopy-gen=package,register // +groupName=storage.k8s.io // +k8s:openapi-gen=true - package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 3289641bc..507b5c1d5 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// DO NOT EDIT! /* Package v1alpha1 is a generated protocol buffer package. @@ -37,8 +38,6 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" import strings "strings" @@ -190,16 +189,6 @@ func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) i += copy(dAtA[i:], *m.PersistentVolumeName) } - if m.InlineVolumeSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InlineVolumeSpec.Size())) - n5, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } return i, nil } @@ -225,11 +214,11 @@ func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n6, err := m.Source.MarshalTo(dAtA[i:]) + n5, err := m.Source.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n5 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) @@ -286,21 +275,21 @@ func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n7, err := m.AttachError.MarshalTo(dAtA[i:]) + n6, err := m.AttachError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n6 } if m.DetachError != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n8, err := m.DetachError.MarshalTo(dAtA[i:]) + n7, err := m.DetachError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n7 } return i, nil } @@ -323,11 +312,11 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n9, err := m.Time.MarshalTo(dAtA[i:]) + n8, err := m.Time.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n8 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -335,6 +324,24 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -377,10 +384,6 @@ func (m *VolumeAttachmentSource) Size() (n int) { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } - if m.InlineVolumeSpec != nil { - l = m.InlineVolumeSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -471,7 +474,6 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "k8s_io_api_core_v1.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -840,39 +842,6 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &k8s_io_api_core_v1.PersistentVolumeSpec{} - } - if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1107,14 +1076,51 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1124,80 +1130,41 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue } - m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -1505,52 +1472,49 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 745 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, - 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, - 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, - 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, - 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, - 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, - 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, - 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, - 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, - 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, - 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, - 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, - 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, - 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, - 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, - 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, - 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, - 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, - 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, - 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, - 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, - 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, - 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, - 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, - 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, - 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, - 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, - 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, - 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, - 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, - 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, - 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, - 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, - 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, - 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, - 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, - 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, - 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, - 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, - 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, - 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, - 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, - 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, - 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, - 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, - 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, + // 704 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0xcd, 0xf3, 0x52, 0xad, 0xa2, 0xe7, 0x89, 0x82, 0xe4, 0x54, + 0x39, 0x15, 0x44, 0xd7, 0xa4, 0x20, 0x54, 0x71, 0x8b, 0xd5, 0x1e, 0x10, 0x6d, 0x41, 0x5b, 0xc4, + 0x01, 0x38, 0xb0, 0xb1, 0xa7, 0x8e, 0x9b, 0xfa, 0x45, 0xbb, 0xeb, 0x48, 0xbd, 0x71, 0xe2, 0xcc, + 0x8d, 0x6f, 0xc0, 0x67, 0xc9, 0x8d, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x16, 0x5c, 0x40, 0x5e, 0x6f, + 0x5e, 0x68, 0x52, 0x68, 0x7b, 0xf3, 0xcc, 0xce, 0xfc, 0x66, 0xe6, 0xbf, 0xb3, 0x46, 0x3b, 0xfd, + 0x6d, 0x41, 0xfc, 0xc8, 0xea, 0x27, 0x5d, 0xe0, 0x21, 0x48, 0x10, 0xd6, 0x00, 0x42, 0x37, 0xe2, + 0x96, 0x3e, 0x60, 0xb1, 0x6f, 0x09, 0x19, 0x71, 0xe6, 0x81, 0x35, 0x68, 0xb3, 0x93, 0xb8, 0xc7, + 0xda, 0x96, 0x07, 0x21, 0x70, 0x26, 0xc1, 0x25, 0x31, 0x8f, 0x64, 0x84, 0xef, 0xe4, 0xc1, 0x84, + 0xc5, 0x3e, 0xd1, 0xc1, 0x64, 0x1c, 0xdc, 0xd8, 0xf4, 0x7c, 0xd9, 0x4b, 0xba, 0xc4, 0x89, 0x02, + 0xcb, 0x8b, 0xbc, 0xc8, 0x52, 0x39, 0xdd, 0xe4, 0x48, 0x59, 0xca, 0x50, 0x5f, 0x39, 0xab, 0xf1, + 0x68, 0x5a, 0x38, 0x60, 0x4e, 0xcf, 0x0f, 0x81, 0x9f, 0x5a, 0x71, 0xdf, 0xcb, 0x1c, 0xc2, 0x0a, + 0x40, 0x32, 0x6b, 0x30, 0xd7, 0x41, 0xc3, 0xba, 0x2a, 0x8b, 0x27, 0xa1, 0xf4, 0x03, 0x98, 0x4b, + 0x78, 0xfc, 0xa7, 0x04, 0xe1, 0xf4, 0x20, 0x60, 0x97, 0xf3, 0x5a, 0x9f, 0x8b, 0x68, 0xed, 0x55, + 0x74, 0x92, 0x04, 0xd0, 0x91, 0x92, 0x39, 0xbd, 0x00, 0x42, 0x89, 0xdf, 0xa1, 0x4a, 0xd6, 0x98, + 0xcb, 0x24, 0xab, 0x1b, 0xeb, 0xc6, 0x46, 0x75, 0xeb, 0x01, 0x99, 0x4a, 0x32, 0xe1, 0x93, 0xb8, + 0xef, 0x65, 0x0e, 0x41, 0xb2, 0x68, 0x32, 0x68, 0x93, 0xe7, 0xdd, 0x63, 0x70, 0xe4, 0x3e, 0x48, + 0x66, 0xe3, 0xe1, 0xa8, 0x59, 0x48, 0x47, 0x4d, 0x34, 0xf5, 0xd1, 0x09, 0x15, 0x1f, 0xa2, 0xb2, + 0x88, 0xc1, 0xa9, 0x17, 0x15, 0xbd, 0x4d, 0x7e, 0x23, 0x38, 0xb9, 0xdc, 0xde, 0x61, 0x0c, 0x8e, + 0xfd, 0x97, 0xc6, 0x97, 0x33, 0x8b, 0x2a, 0x18, 0x7e, 0x83, 0x96, 0x85, 0x64, 0x32, 0x11, 0xf5, + 0x92, 0xc2, 0x3e, 0xbc, 0x19, 0x56, 0xa5, 0xda, 0xff, 0x68, 0xf0, 0x72, 0x6e, 0x53, 0x8d, 0x6c, + 0x0d, 0x0d, 0x54, 0xbb, 0x9c, 0xb2, 0xe7, 0x0b, 0x89, 0xdf, 0xce, 0x89, 0x45, 0xae, 0x27, 0x56, + 0x96, 0xad, 0xa4, 0x5a, 0xd3, 0x25, 0x2b, 0x63, 0xcf, 0x8c, 0x50, 0x14, 0x2d, 0xf9, 0x12, 0x02, + 0x51, 0x2f, 0xae, 0x97, 0x36, 0xaa, 0x5b, 0x9b, 0x37, 0x1a, 0xc9, 0xfe, 0x5b, 0x93, 0x97, 0x9e, + 0x66, 0x0c, 0x9a, 0xa3, 0x5a, 0x47, 0xe8, 0xbf, 0xb9, 0xe1, 0xa3, 0x84, 0x3b, 0x80, 0xf7, 0x50, + 0x2d, 0x06, 0x2e, 0x7c, 0x21, 0x21, 0x94, 0x79, 0xcc, 0x01, 0x0b, 0x40, 0xcd, 0xb5, 0x6a, 0xd7, + 0xd3, 0x51, 0xb3, 0xf6, 0x62, 0xc1, 0x39, 0x5d, 0x98, 0xd5, 0xfa, 0xb2, 0x40, 0xb2, 0xec, 0xba, + 0xf0, 0x7d, 0x54, 0x61, 0xca, 0x03, 0x5c, 0xa3, 0x27, 0x12, 0x74, 0xb4, 0x9f, 0x4e, 0x22, 0xd4, + 0xb5, 0xaa, 0xf6, 0xf4, 0xb6, 0xdc, 0xf0, 0x5a, 0x55, 0xea, 0xcc, 0xb5, 0x2a, 0x9b, 0x6a, 0x64, + 0xd6, 0x4a, 0x18, 0xb9, 0xf9, 0x94, 0xa5, 0x5f, 0x5b, 0x39, 0xd0, 0x7e, 0x3a, 0x89, 0x68, 0xfd, + 0x28, 0x2d, 0x90, 0x4e, 0xed, 0xc7, 0xcc, 0x4c, 0xae, 0x9a, 0xa9, 0x32, 0x37, 0x93, 0x3b, 0x99, + 0xc9, 0xc5, 0x9f, 0x0c, 0x84, 0xd9, 0x04, 0xb1, 0x3f, 0xde, 0x9f, 0xfc, 0x92, 0x9f, 0xdd, 0x62, + 0x6f, 0x49, 0x67, 0x8e, 0xb6, 0x1b, 0x4a, 0x7e, 0x6a, 0x37, 0x74, 0x17, 0x78, 0x3e, 0x80, 0x2e, + 0x68, 0x01, 0x1f, 0xa3, 0x6a, 0xee, 0xdd, 0xe5, 0x3c, 0xe2, 0xfa, 0x25, 0x6d, 0x5c, 0xa3, 0x23, + 0x15, 0x6f, 0x9b, 0xe9, 0xa8, 0x59, 0xed, 0x4c, 0x01, 0xdf, 0x47, 0xcd, 0xea, 0xcc, 0x39, 0x9d, + 0x85, 0x67, 0xb5, 0x5c, 0x98, 0xd6, 0x2a, 0xdf, 0xa6, 0xd6, 0x0e, 0x5c, 0x5d, 0x6b, 0x06, 0xde, + 0xd8, 0x45, 0xff, 0x5f, 0x21, 0x11, 0x5e, 0x43, 0xa5, 0x3e, 0x9c, 0xe6, 0x9b, 0x48, 0xb3, 0x4f, + 0x5c, 0x43, 0x4b, 0x03, 0x76, 0x92, 0xe4, 0x1b, 0xb7, 0x4a, 0x73, 0xe3, 0x49, 0x71, 0xdb, 0x68, + 0x7d, 0x30, 0xd0, 0x6c, 0x0d, 0xbc, 0x87, 0xca, 0xd9, 0xef, 0x55, 0xbf, 0xfc, 0x7b, 0xd7, 0x7b, + 0xf9, 0x2f, 0xfd, 0x00, 0xa6, 0x7f, 0xb0, 0xcc, 0xa2, 0x8a, 0x82, 0xef, 0xa2, 0x95, 0x00, 0x84, + 0x60, 0x9e, 0xae, 0x6c, 0xff, 0xab, 0x83, 0x56, 0xf6, 0x73, 0x37, 0x1d, 0x9f, 0xdb, 0x64, 0x78, + 0x61, 0x16, 0xce, 0x2e, 0xcc, 0xc2, 0xf9, 0x85, 0x59, 0x78, 0x9f, 0x9a, 0xc6, 0x30, 0x35, 0x8d, + 0xb3, 0xd4, 0x34, 0xce, 0x53, 0xd3, 0xf8, 0x9a, 0x9a, 0xc6, 0xc7, 0x6f, 0x66, 0xe1, 0x75, 0x65, + 0x2c, 0xdc, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0xba, 0xdb, 0x12, 0x1a, 0x07, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 76ad6dc0d..964bb5f7b 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -16,10 +16,7 @@ limitations under the License. package v1alpha1 -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced @@ -84,14 +81,7 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` + // Placeholder for *VolumeSource to accommodate inline volumes in pods. } // VolumeAttachmentStatus is the status of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 3701b0864..32d7dcc52 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -49,7 +49,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index 3debf9df1..e27c6ff3f 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,6 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -57,7 +56,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -94,11 +93,6 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } - if in.InlineVolumeSpec != nil { - in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec - *out = new(v1.PersistentVolumeSpec) - (*in).DeepCopyInto(*out) - } return } diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index e3e3626e2..8957a4cf2 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true - package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index d76a35e65..fed8c7a63 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -24,13 +25,6 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto It has these top-level messages: - CSIDriver - CSIDriverList - CSIDriverSpec - CSINode - CSINodeDriver - CSINodeList - CSINodeSpec StorageClass StorageClassList VolumeAttachment @@ -66,74 +60,39 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CSIDriver) Reset() { *m = CSIDriver{} } -func (*CSIDriver) ProtoMessage() {} -func (*CSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } -func (*CSIDriverList) ProtoMessage() {} -func (*CSIDriverList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } -func (*CSIDriverSpec) ProtoMessage() {} -func (*CSIDriverSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *CSINode) Reset() { *m = CSINode{} } -func (*CSINode) ProtoMessage() {} -func (*CSINode) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } -func (*CSINodeDriver) ProtoMessage() {} -func (*CSINodeDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *CSINodeList) Reset() { *m = CSINodeList{} } -func (*CSINodeList) ProtoMessage() {} -func (*CSINodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } -func (*CSINodeSpec) ProtoMessage() {} -func (*CSINodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func init() { - proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") - proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1beta1.CSIDriverList") - proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1beta1.CSIDriverSpec") - proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1beta1.CSINode") - proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1beta1.CSINodeDriver") - proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") - proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") @@ -143,259 +102,6 @@ func init() { proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") } -func (m *CSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.AttachRequired != nil { - dAtA[i] = 0x8 - i++ - if *m.AttachRequired { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.PodInfoOnMount != nil { - dAtA[i] = 0x10 - i++ - if *m.PodInfoOnMount { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *CSINode) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) - i += copy(dAtA[i:], m.NodeID) - if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *CSINodeList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Drivers) > 0 { - for _, msg := range m.Drivers { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -414,11 +120,11 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n1 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) @@ -515,11 +221,11 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n2 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -553,27 +259,27 @@ func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n3 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) + n4, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n4 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) + n5, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n5 return i, nil } @@ -595,11 +301,11 @@ func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -636,16 +342,6 @@ func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) i += copy(dAtA[i:], *m.PersistentVolumeName) } - if m.InlineVolumeSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InlineVolumeSpec.Size())) - n13, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } return i, nil } @@ -671,11 +367,11 @@ func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n14, err := m.Source.MarshalTo(dAtA[i:]) + n7, err := m.Source.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n7 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) @@ -732,21 +428,21 @@ func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n15, err := m.AttachError.MarshalTo(dAtA[i:]) + n8, err := m.AttachError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n8 } if m.DetachError != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n16, err := m.DetachError.MarshalTo(dAtA[i:]) + n9, err := m.DetachError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n9 } return i, nil } @@ -769,11 +465,11 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n17, err := m.Time.MarshalTo(dAtA[i:]) + n10, err := m.Time.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n10 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -781,6 +477,24 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -790,100 +504,12 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *CSIDriver) Size() (n int) { +func (m *StorageClass) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSIDriverList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSIDriverSpec) Size() (n int) { - var l int - _ = l - if m.AttachRequired != nil { - n += 2 - } - if m.PodInfoOnMount != nil { - n += 2 - } - return n -} - -func (m *CSINode) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSINodeDriver) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSINodeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSINodeSpec) Size() (n int) { - var l int - _ = l - if len(m.Drivers) > 0 { - for _, e := range m.Drivers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StorageClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) + l = len(m.Provisioner) n += 1 + l + sovGenerated(uint64(l)) if len(m.Parameters) > 0 { for k, v := range m.Parameters { @@ -966,10 +592,6 @@ func (m *VolumeAttachmentSource) Size() (n int) { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } - if m.InlineVolumeSpec != nil { - l = m.InlineVolumeSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -1031,83 +653,6 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *CSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriver{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIDriverList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriverList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIDriverSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriverSpec{`, - `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, - `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, - `}`, - }, "") - return s -} -func (this *CSINode) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINode{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINodeDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, - `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSINode", "CSINode", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINodeSpec{`, - `Drivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Drivers), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *StorageClass) String() string { if this == nil { return "nil" @@ -1174,817 +719,64 @@ func (this *VolumeAttachmentSource) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "k8s_io_api_core_v1.PersistentVolumeSpec", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" - } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) - } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeError) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIDriverList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CSIDriver{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AttachRequired = &b - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PodInfoOnMount = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINode) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINode: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINodeList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CSINode{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" } - return nil + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s } -func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drivers = append(m.Drivers, CSINodeDriver{}) - if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } +func (this *VolumeError) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - return nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } func (m *StorageClass) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -2100,14 +892,51 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Parameters == nil { m.Parameters = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2117,80 +946,41 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Parameters[mapkey] = mapvalue + } else { + var mapvalue string + m.Parameters[mapkey] = mapvalue } - m.Parameters[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -2775,39 +1565,6 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &k8s_io_api_core_v1.PersistentVolumeSpec{} - } - if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3042,14 +1799,51 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3059,80 +1853,41 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue } - m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -3440,83 +2195,67 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1247 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xce, 0xc6, 0xf9, 0x1c, 0x27, 0xad, 0x33, 0x44, 0x60, 0x7c, 0xb0, 0x23, 0x23, 0x68, 0x5a, - 0xb5, 0xeb, 0xb6, 0x2a, 0xa8, 0xaa, 0xc4, 0x21, 0x4e, 0x23, 0xe1, 0xb6, 0x4e, 0xc3, 0x24, 0xaa, - 0x50, 0xc5, 0x81, 0xc9, 0xee, 0x5b, 0x67, 0x1b, 0xef, 0xce, 0x76, 0x76, 0x6c, 0xf0, 0x8d, 0x13, - 0x1c, 0x41, 0x1c, 0xf8, 0x05, 0xfc, 0x05, 0x90, 0xe0, 0xc2, 0x91, 0x9e, 0x50, 0xc5, 0xa9, 0x27, - 0x8b, 0x2e, 0xff, 0xa2, 0xe2, 0x80, 0x66, 0x76, 0xec, 0xfd, 0xb0, 0xdd, 0x38, 0x1c, 0x7c, 0xf3, - 0xbc, 0x1f, 0xcf, 0xfb, 0xf5, 0xcc, 0x3b, 0x6b, 0xb4, 0x7b, 0x7a, 0x3b, 0x30, 0x1d, 0x56, 0x3b, - 0xed, 0x1c, 0x03, 0xf7, 0x40, 0x40, 0x50, 0xeb, 0x82, 0x67, 0x33, 0x5e, 0xd3, 0x0a, 0xea, 0x3b, - 0xb5, 0x40, 0x30, 0x4e, 0x5b, 0x50, 0xeb, 0xde, 0x38, 0x06, 0x41, 0x6f, 0xd4, 0x5a, 0xe0, 0x01, - 0xa7, 0x02, 0x6c, 0xd3, 0xe7, 0x4c, 0x30, 0x5c, 0x8a, 0x6c, 0x4d, 0xea, 0x3b, 0xa6, 0xb6, 0x35, - 0xb5, 0x6d, 0xe9, 0x5a, 0xcb, 0x11, 0x27, 0x9d, 0x63, 0xd3, 0x62, 0x6e, 0xad, 0xc5, 0x5a, 0xac, - 0xa6, 0x5c, 0x8e, 0x3b, 0x4f, 0xd4, 0x49, 0x1d, 0xd4, 0xaf, 0x08, 0xaa, 0x54, 0x4d, 0x84, 0xb5, - 0x18, 0x97, 0x31, 0xb3, 0xe1, 0x4a, 0xb7, 0x62, 0x1b, 0x97, 0x5a, 0x27, 0x8e, 0x07, 0xbc, 0x57, - 0xf3, 0x4f, 0x5b, 0x52, 0x10, 0xd4, 0x5c, 0x10, 0x74, 0x9c, 0x57, 0x6d, 0x92, 0x17, 0xef, 0x78, - 0xc2, 0x71, 0x61, 0xc4, 0xe1, 0xa3, 0xb3, 0x1c, 0x02, 0xeb, 0x04, 0x5c, 0x9a, 0xf5, 0xab, 0xfe, - 0x66, 0xa0, 0xd5, 0xdd, 0xc3, 0xc6, 0x5d, 0xee, 0x74, 0x81, 0xe3, 0x2f, 0xd0, 0x8a, 0xcc, 0xc8, - 0xa6, 0x82, 0x16, 0x8d, 0x2d, 0x63, 0x3b, 0x7f, 0xf3, 0xba, 0x19, 0xb7, 0x6b, 0x08, 0x6c, 0xfa, - 0xa7, 0x2d, 0x29, 0x08, 0x4c, 0x69, 0x6d, 0x76, 0x6f, 0x98, 0x0f, 0x8f, 0x9f, 0x82, 0x25, 0x9a, - 0x20, 0x68, 0x1d, 0x3f, 0xef, 0x57, 0xe6, 0xc2, 0x7e, 0x05, 0xc5, 0x32, 0x32, 0x44, 0xc5, 0xf7, - 0xd1, 0x42, 0xe0, 0x83, 0x55, 0x9c, 0x57, 0xe8, 0x97, 0xcd, 0xc9, 0xc3, 0x30, 0x87, 0x69, 0x1d, - 0xfa, 0x60, 0xd5, 0xd7, 0x34, 0xec, 0x82, 0x3c, 0x11, 0x05, 0x52, 0xfd, 0xd5, 0x40, 0xeb, 0x43, - 0xab, 0x07, 0x4e, 0x20, 0xf0, 0xe7, 0x23, 0x05, 0x98, 0xd3, 0x15, 0x20, 0xbd, 0x55, 0xfa, 0x05, - 0x1d, 0x67, 0x65, 0x20, 0x49, 0x24, 0x7f, 0x0f, 0x2d, 0x3a, 0x02, 0xdc, 0xa0, 0x38, 0xbf, 0x95, - 0xdb, 0xce, 0xdf, 0x7c, 0x7f, 0xaa, 0xec, 0xeb, 0xeb, 0x1a, 0x71, 0xb1, 0x21, 0x7d, 0x49, 0x04, - 0x51, 0xfd, 0x36, 0x99, 0xbb, 0xac, 0x09, 0xdf, 0x41, 0x17, 0xa8, 0x10, 0xd4, 0x3a, 0x21, 0xf0, - 0xac, 0xe3, 0x70, 0xb0, 0x55, 0x05, 0x2b, 0x75, 0x1c, 0xf6, 0x2b, 0x17, 0x76, 0x52, 0x1a, 0x92, - 0xb1, 0x94, 0xbe, 0x3e, 0xb3, 0x1b, 0xde, 0x13, 0xf6, 0xd0, 0x6b, 0xb2, 0x8e, 0x27, 0x54, 0x83, - 0xb5, 0xef, 0x41, 0x4a, 0x43, 0x32, 0x96, 0xd5, 0x5f, 0x0c, 0xb4, 0xbc, 0x7b, 0xd8, 0xd8, 0x67, - 0x36, 0xcc, 0x80, 0x00, 0x8d, 0x14, 0x01, 0x2e, 0x9d, 0xd1, 0x42, 0x99, 0xd4, 0xc4, 0xf1, 0x7f, - 0x17, 0xb5, 0x50, 0xda, 0x68, 0xfe, 0x6e, 0xa1, 0x05, 0x8f, 0xba, 0xa0, 0x52, 0x5f, 0x8d, 0x7d, - 0xf6, 0xa9, 0x0b, 0x44, 0x69, 0xf0, 0x07, 0x68, 0xc9, 0x63, 0x36, 0x34, 0xee, 0xaa, 0x04, 0x56, - 0xeb, 0x17, 0xb4, 0xcd, 0xd2, 0xbe, 0x92, 0x12, 0xad, 0xc5, 0xb7, 0xd0, 0x9a, 0x60, 0x3e, 0x6b, - 0xb3, 0x56, 0xef, 0x3e, 0xf4, 0x82, 0x62, 0x6e, 0x2b, 0xb7, 0xbd, 0x5a, 0x2f, 0x84, 0xfd, 0xca, - 0xda, 0x51, 0x42, 0x4e, 0x52, 0x56, 0xd5, 0x9f, 0x0d, 0x94, 0xd7, 0x19, 0xcd, 0x80, 0x8e, 0x9f, - 0xa4, 0xe9, 0xf8, 0xde, 0x14, 0xbd, 0x9c, 0x40, 0x46, 0x6b, 0x98, 0xb6, 0x62, 0xe2, 0x11, 0x5a, - 0xb6, 0x55, 0x43, 0x83, 0xa2, 0xa1, 0xa0, 0x2f, 0x4f, 0x01, 0xad, 0xd9, 0x7e, 0x51, 0x07, 0x58, - 0x8e, 0xce, 0x01, 0x19, 0x40, 0x55, 0x7f, 0x58, 0x42, 0x6b, 0x87, 0x91, 0xef, 0x6e, 0x9b, 0x06, - 0xc1, 0x0c, 0xc8, 0xf6, 0x21, 0xca, 0xfb, 0x9c, 0x75, 0x9d, 0xc0, 0x61, 0x1e, 0x70, 0x3d, 0xf2, - 0xb7, 0xb4, 0x4b, 0xfe, 0x20, 0x56, 0x91, 0xa4, 0x1d, 0x6e, 0x23, 0xe4, 0x53, 0x4e, 0x5d, 0x10, - 0xb2, 0x05, 0x39, 0xd5, 0x82, 0xdb, 0x6f, 0x6a, 0x41, 0xb2, 0x2c, 0xf3, 0x60, 0xe8, 0xba, 0xe7, - 0x09, 0xde, 0x8b, 0x53, 0x8c, 0x15, 0x24, 0x81, 0x8f, 0x4f, 0xd1, 0x3a, 0x07, 0xab, 0x4d, 0x1d, - 0xf7, 0x80, 0xb5, 0x1d, 0xab, 0x57, 0x5c, 0x50, 0x69, 0xee, 0x85, 0xfd, 0xca, 0x3a, 0x49, 0x2a, - 0x5e, 0xf7, 0x2b, 0xd7, 0x47, 0x5f, 0x1c, 0xf3, 0x00, 0x78, 0xe0, 0x04, 0x02, 0x3c, 0xf1, 0x88, - 0xb5, 0x3b, 0x2e, 0xa4, 0x7c, 0x48, 0x1a, 0x5b, 0xf2, 0xda, 0x95, 0xb7, 0xfe, 0xa1, 0x2f, 0x1c, - 0xe6, 0x05, 0xc5, 0xc5, 0x98, 0xd7, 0xcd, 0x84, 0x9c, 0xa4, 0xac, 0xf0, 0x03, 0xb4, 0x49, 0xdb, - 0x6d, 0xf6, 0x65, 0x14, 0x60, 0xef, 0x2b, 0x9f, 0x7a, 0xb2, 0x55, 0xc5, 0x25, 0xb5, 0x64, 0x8a, - 0x61, 0xbf, 0xb2, 0xb9, 0x33, 0x46, 0x4f, 0xc6, 0x7a, 0xe1, 0xcf, 0xd0, 0x46, 0x57, 0x89, 0xea, - 0x8e, 0x67, 0x3b, 0x5e, 0xab, 0xc9, 0x6c, 0x28, 0x2e, 0xab, 0xa2, 0xaf, 0x84, 0xfd, 0xca, 0xc6, - 0xa3, 0xac, 0xf2, 0xf5, 0x38, 0x21, 0x19, 0x05, 0xc1, 0xcf, 0xd0, 0x86, 0x8a, 0x08, 0xb6, 0xbe, - 0xa4, 0x0e, 0x04, 0xc5, 0x15, 0x35, 0xbf, 0xed, 0xe4, 0xfc, 0x64, 0xeb, 0x24, 0x91, 0x06, 0x57, - 0xf9, 0x10, 0xda, 0x60, 0x09, 0xc6, 0x8f, 0x80, 0xbb, 0xf5, 0x77, 0xf5, 0xbc, 0x36, 0x76, 0xb2, - 0x50, 0x64, 0x14, 0xbd, 0xf4, 0x31, 0xba, 0x98, 0x19, 0x38, 0x2e, 0xa0, 0xdc, 0x29, 0xf4, 0xa2, - 0x25, 0x44, 0xe4, 0x4f, 0xbc, 0x89, 0x16, 0xbb, 0xb4, 0xdd, 0x81, 0x88, 0x81, 0x24, 0x3a, 0xdc, - 0x99, 0xbf, 0x6d, 0x54, 0x7f, 0x37, 0x50, 0x21, 0xc9, 0x9e, 0x19, 0xac, 0x8d, 0x66, 0x7a, 0x6d, - 0x6c, 0x4f, 0x4b, 0xec, 0x09, 0xbb, 0xe3, 0xa7, 0x79, 0x54, 0x88, 0x86, 0x13, 0xbd, 0x51, 0x2e, - 0x78, 0x62, 0x06, 0x57, 0x9b, 0xa4, 0xde, 0x91, 0xeb, 0x6f, 0x2a, 0x22, 0x9b, 0xdd, 0xa4, 0x07, - 0x05, 0x3f, 0x46, 0x4b, 0x81, 0xa0, 0xa2, 0x23, 0xef, 0xbc, 0x44, 0xbd, 0x79, 0x2e, 0x54, 0xe5, - 0x19, 0x3f, 0x28, 0xd1, 0x99, 0x68, 0xc4, 0xea, 0x1f, 0x06, 0xda, 0xcc, 0xba, 0xcc, 0x60, 0xd8, - 0x9f, 0xa6, 0x87, 0x7d, 0xf5, 0x3c, 0x15, 0x4d, 0x18, 0xf8, 0x5f, 0x06, 0x7a, 0x7b, 0xa4, 0x78, - 0xd6, 0xe1, 0x16, 0xc8, 0x3d, 0xe1, 0x67, 0xb6, 0xd1, 0x7e, 0xfc, 0x1e, 0xab, 0x3d, 0x71, 0x30, - 0x46, 0x4f, 0xc6, 0x7a, 0xe1, 0xa7, 0xa8, 0xe0, 0x78, 0x6d, 0xc7, 0x83, 0x48, 0x76, 0x18, 0x8f, - 0x7b, 0xec, 0x65, 0xce, 0x22, 0xab, 0x31, 0x6f, 0x86, 0xfd, 0x4a, 0xa1, 0x91, 0x41, 0x21, 0x23, - 0xb8, 0xd5, 0x3f, 0xc7, 0x8c, 0x47, 0xbd, 0x85, 0x57, 0xd1, 0x4a, 0xf4, 0xad, 0x05, 0x5c, 0x97, - 0x31, 0x6c, 0xf7, 0x8e, 0x96, 0x93, 0xa1, 0x85, 0x62, 0x90, 0x6a, 0x85, 0x4e, 0xf4, 0x7c, 0x0c, - 0x52, 0x9e, 0x09, 0x06, 0xa9, 0x33, 0xd1, 0x88, 0x32, 0x13, 0xf9, 0x71, 0xa2, 0x1a, 0x9a, 0x4b, - 0x67, 0xb2, 0xaf, 0xe5, 0x64, 0x68, 0x51, 0xfd, 0x37, 0x37, 0x66, 0x4a, 0x8a, 0x8a, 0x89, 0x92, - 0x06, 0x9f, 0x98, 0xd9, 0x92, 0xec, 0x61, 0x49, 0x36, 0xfe, 0xd1, 0x40, 0x98, 0x0e, 0x21, 0x9a, - 0x03, 0xaa, 0x46, 0x7c, 0xba, 0x77, 0xfe, 0x1b, 0x62, 0xee, 0x8c, 0x80, 0x45, 0xef, 0x64, 0x49, - 0x27, 0x81, 0x47, 0x0d, 0xc8, 0x98, 0x0c, 0xb0, 0x83, 0xf2, 0x91, 0x74, 0x8f, 0x73, 0xc6, 0xf5, - 0x95, 0xbd, 0x74, 0x76, 0x42, 0xca, 0xbc, 0x5e, 0x96, 0x5f, 0x00, 0x3b, 0xb1, 0xff, 0xeb, 0x7e, - 0x25, 0x9f, 0xd0, 0x93, 0x24, 0xb6, 0x0c, 0x65, 0x43, 0x1c, 0x6a, 0xe1, 0x7f, 0x84, 0xba, 0x0b, - 0x93, 0x43, 0x25, 0xb0, 0x4b, 0x7b, 0xe8, 0x9d, 0x09, 0x0d, 0x3a, 0xd7, 0xbb, 0xf2, 0x8d, 0x81, - 0x92, 0x31, 0xf0, 0x03, 0xb4, 0x20, 0xff, 0x06, 0xea, 0x0d, 0x73, 0x65, 0xba, 0x0d, 0x73, 0xe4, - 0xb8, 0x10, 0x2f, 0x4a, 0x79, 0x22, 0x0a, 0x05, 0x5f, 0x46, 0xcb, 0x2e, 0x04, 0x01, 0x6d, 0xe9, - 0xc8, 0xf1, 0x57, 0x5f, 0x33, 0x12, 0x93, 0x81, 0xbe, 0x7e, 0xed, 0xf9, 0xab, 0xf2, 0xdc, 0x8b, - 0x57, 0xe5, 0xb9, 0x97, 0xaf, 0xca, 0x73, 0x5f, 0x87, 0x65, 0xe3, 0x79, 0x58, 0x36, 0x5e, 0x84, - 0x65, 0xe3, 0x65, 0x58, 0x36, 0xfe, 0x0e, 0xcb, 0xc6, 0xf7, 0xff, 0x94, 0xe7, 0x1e, 0x2f, 0xeb, - 0xbe, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xfc, 0xf7, 0xf5, 0xe3, 0x0f, 0x00, 0x00, + // 988 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x4d, 0x86, 0x08, 0x8c, 0x0f, 0x76, 0xe4, 0x0b, 0xa6, + 0x6a, 0x77, 0x9b, 0xa8, 0xa0, 0x08, 0x89, 0x83, 0xb7, 0xe4, 0x00, 0x8a, 0xdb, 0x30, 0x89, 0x2a, + 0x54, 0x71, 0x60, 0xb2, 0xfb, 0x76, 0xb3, 0x78, 0x77, 0x67, 0x99, 0x19, 0x1b, 0x72, 0xe3, 0xc4, + 0x19, 0x71, 0xe0, 0x17, 0xf0, 0x3f, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, 0x8b, 0x2c, 0xff, 0x22, + 0xe2, 0x80, 0x66, 0x76, 0x62, 0xaf, 0xbd, 0x0e, 0x6d, 0x7a, 0xe8, 0xcd, 0xef, 0xc7, 0xf3, 0xbc, + 0xdf, 0xb3, 0x46, 0x8f, 0xfa, 0xfb, 0xc2, 0x0e, 0x99, 0xd3, 0x1f, 0x9c, 0x02, 0x4f, 0x40, 0x82, + 0x70, 0x86, 0x90, 0xf8, 0x8c, 0x3b, 0xc6, 0x40, 0xd3, 0xd0, 0x11, 0x92, 0x71, 0x1a, 0x80, 0x33, + 0xdc, 0x3d, 0x05, 0x49, 0x77, 0x9d, 0x00, 0x12, 0xe0, 0x54, 0x82, 0x6f, 0xa7, 0x9c, 0x49, 0x86, + 0x1b, 0xb9, 0xaf, 0x4d, 0xd3, 0xd0, 0x36, 0xbe, 0xb6, 0xf1, 0x6d, 0xdc, 0x0f, 0x42, 0x79, 0x36, + 0x38, 0xb5, 0x3d, 0x16, 0x3b, 0x01, 0x0b, 0x98, 0xa3, 0x21, 0xa7, 0x83, 0xe7, 0x5a, 0xd2, 0x82, + 0xfe, 0x95, 0x53, 0x35, 0xda, 0x85, 0xb0, 0x1e, 0xe3, 0x2a, 0xe6, 0x6c, 0xb8, 0xc6, 0xc3, 0x89, + 0x4f, 0x4c, 0xbd, 0xb3, 0x30, 0x01, 0x7e, 0xee, 0xa4, 0xfd, 0x40, 0x29, 0x84, 0x13, 0x83, 0xa4, + 0xf3, 0x50, 0xce, 0x4d, 0x28, 0x3e, 0x48, 0x64, 0x18, 0x43, 0x09, 0xf0, 0xc9, 0xab, 0x00, 0xc2, + 0x3b, 0x83, 0x98, 0xce, 0xe2, 0xda, 0xbf, 0xae, 0xa0, 0xf5, 0xe3, 0xbc, 0x0b, 0x8f, 0x22, 0x2a, + 0x04, 0xfe, 0x16, 0x55, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x6e, 0xed, 0x58, 0x9d, 0xda, 0xde, 0x03, + 0x7b, 0xd2, 0xb1, 0x31, 0xb7, 0x9d, 0xf6, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x87, 0xbb, 0xf6, + 0x93, 0xd3, 0xef, 0xc0, 0x93, 0x3d, 0x90, 0xd4, 0xc5, 0x17, 0xa3, 0xd6, 0x42, 0x36, 0x6a, 0xa1, + 0x89, 0x8e, 0x8c, 0x59, 0xf1, 0xc7, 0xa8, 0x96, 0x72, 0x36, 0x0c, 0x45, 0xc8, 0x12, 0xe0, 0xf5, + 0xc5, 0x1d, 0xab, 0xb3, 0xe6, 0xbe, 0x6b, 0x20, 0xb5, 0xa3, 0x89, 0x89, 0x14, 0xfd, 0x70, 0x84, + 0x50, 0x4a, 0x39, 0x8d, 0x41, 0x02, 0x17, 0xf5, 0xca, 0x4e, 0xa5, 0x53, 0xdb, 0xdb, 0xb7, 0x6f, + 0x1e, 0xa6, 0x5d, 0x2c, 0xcb, 0x3e, 0x1a, 0x43, 0x0f, 0x12, 0xc9, 0xcf, 0x27, 0x29, 0x4e, 0x0c, + 0xa4, 0xc0, 0x8f, 0xfb, 0x68, 0x83, 0x83, 0x17, 0xd1, 0x30, 0x3e, 0x62, 0x51, 0xe8, 0x9d, 0xd7, + 0x97, 0x74, 0x9a, 0x07, 0xd9, 0xa8, 0xb5, 0x41, 0x8a, 0x86, 0xab, 0x51, 0xeb, 0x41, 0x79, 0x0d, + 0xec, 0x23, 0xe0, 0x22, 0x14, 0x12, 0x12, 0xf9, 0x94, 0x45, 0x83, 0x18, 0xa6, 0x30, 0x64, 0x9a, + 0x1b, 0x3f, 0x44, 0xeb, 0x31, 0x1b, 0x24, 0xf2, 0x49, 0x2a, 0x43, 0x96, 0x88, 0xfa, 0xf2, 0x4e, + 0xa5, 0xb3, 0xe6, 0x6e, 0x66, 0xa3, 0xd6, 0x7a, 0xaf, 0xa0, 0x27, 0x53, 0x5e, 0xf8, 0x10, 0x6d, + 0xd3, 0x28, 0x62, 0x3f, 0xe4, 0x01, 0x0e, 0x7e, 0x4c, 0x69, 0xa2, 0x5a, 0x55, 0x5f, 0xd9, 0xb1, + 0x3a, 0x55, 0xb7, 0x9e, 0x8d, 0x5a, 0xdb, 0xdd, 0x39, 0x76, 0x32, 0x17, 0x85, 0xbf, 0x46, 0x5b, + 0x43, 0xad, 0x72, 0xc3, 0xc4, 0x0f, 0x93, 0xa0, 0xc7, 0x7c, 0xa8, 0xaf, 0xea, 0xa2, 0xef, 0x66, + 0xa3, 0xd6, 0xd6, 0xd3, 0x59, 0xe3, 0xd5, 0x3c, 0x25, 0x29, 0x93, 0xe0, 0xef, 0xd1, 0x96, 0x8e, + 0x08, 0xfe, 0x09, 0x4b, 0x59, 0xc4, 0x82, 0x10, 0x44, 0xbd, 0xaa, 0xe7, 0xd7, 0x29, 0xce, 0x4f, + 0xb5, 0x4e, 0x2d, 0x92, 0xf1, 0x3a, 0x3f, 0x86, 0x08, 0x3c, 0xc9, 0xf8, 0x09, 0xf0, 0xd8, 0xfd, + 0xc0, 0xcc, 0x6b, 0xab, 0x3b, 0x4b, 0x45, 0xca, 0xec, 0x8d, 0xcf, 0xd0, 0x9d, 0x99, 0x81, 0xe3, + 0x4d, 0x54, 0xe9, 0xc3, 0xb9, 0x5e, 0xe9, 0x35, 0xa2, 0x7e, 0xe2, 0x6d, 0xb4, 0x3c, 0xa4, 0xd1, + 0x00, 0xf2, 0x0d, 0x24, 0xb9, 0xf0, 0xe9, 0xe2, 0xbe, 0xd5, 0xfe, 0xc3, 0x42, 0x9b, 0xc5, 0xed, + 0x39, 0x0c, 0x85, 0xc4, 0xdf, 0x94, 0x0e, 0xc3, 0x7e, 0xbd, 0xc3, 0x50, 0x68, 0x7d, 0x16, 0x9b, + 0xa6, 0x86, 0xea, 0xb5, 0xa6, 0x70, 0x14, 0x3d, 0xb4, 0x1c, 0x4a, 0x88, 0x45, 0x7d, 0xb1, 0xdc, + 0x98, 0xff, 0x5b, 0x6c, 0x77, 0xc3, 0x90, 0x2e, 0x7f, 0xa1, 0xe0, 0x24, 0x67, 0x69, 0xff, 0xbe, + 0x88, 0x36, 0xf3, 0xe1, 0x74, 0xa5, 0xa4, 0xde, 0x59, 0x0c, 0x89, 0x7c, 0x0b, 0xa7, 0x4d, 0xd0, + 0x92, 0x48, 0xc1, 0xd3, 0x1d, 0x9d, 0x66, 0x2f, 0x15, 0x31, 0x9b, 0xdd, 0x71, 0x0a, 0x9e, 0xbb, + 0x6e, 0xd8, 0x97, 0x94, 0x44, 0x34, 0x17, 0x7e, 0x86, 0x56, 0x84, 0xa4, 0x72, 0xa0, 0x6e, 0x5e, + 0xb1, 0xee, 0xdd, 0x8a, 0x55, 0x23, 0xdd, 0x77, 0x0c, 0xef, 0x4a, 0x2e, 0x13, 0xc3, 0xd8, 0xfe, + 0xd3, 0x42, 0xdb, 0xb3, 0x90, 0xb7, 0x30, 0xec, 0xaf, 0xa6, 0x87, 0x7d, 0xef, 0x36, 0x15, 0xdd, + 0x30, 0xf0, 0xe7, 0xe8, 0xbd, 0x52, 0xed, 0x6c, 0xc0, 0x3d, 0x50, 0xcf, 0x44, 0x3a, 0xf3, 0x18, + 0x3d, 0xa6, 0x31, 0xe4, 0x97, 0x90, 0x3f, 0x13, 0x47, 0x73, 0xec, 0x64, 0x2e, 0xaa, 0xfd, 0xd7, + 0x9c, 0x8e, 0xa9, 0x61, 0xe1, 0x7b, 0xa8, 0x4a, 0xb5, 0x06, 0xb8, 0xa1, 0x1e, 0x77, 0xa0, 0x6b, + 0xf4, 0x64, 0xec, 0xa1, 0x87, 0xaa, 0xd3, 0x33, 0xab, 0x72, 0xbb, 0xa1, 0x6a, 0x64, 0x61, 0xa8, + 0x5a, 0x26, 0x86, 0x51, 0x65, 0x92, 0x30, 0x3f, 0x2f, 0xb2, 0x32, 0x9d, 0xc9, 0x63, 0xa3, 0x27, + 0x63, 0x8f, 0xf6, 0xbf, 0x95, 0x39, 0x9d, 0xd3, 0xdb, 0x51, 0x28, 0xc9, 0xd7, 0x25, 0x55, 0x4b, + 0x25, 0xf9, 0xe3, 0x92, 0x7c, 0xfc, 0x9b, 0x85, 0x30, 0x1d, 0x53, 0xf4, 0xae, 0xb7, 0x27, 0x1f, + 0xf1, 0x97, 0xb7, 0x5f, 0x5a, 0xbb, 0x5b, 0x22, 0xcb, 0x3f, 0x5d, 0x0d, 0x93, 0x04, 0x2e, 0x3b, + 0x90, 0x39, 0x19, 0xe0, 0x10, 0xd5, 0x72, 0xed, 0x01, 0xe7, 0x8c, 0x9b, 0x2b, 0xfa, 0xf0, 0xd5, + 0x09, 0x69, 0x77, 0xb7, 0xa9, 0x3e, 0xca, 0xdd, 0x09, 0xfe, 0x6a, 0xd4, 0xaa, 0x15, 0xec, 0xa4, + 0xc8, 0xad, 0x42, 0xf9, 0x30, 0x09, 0xb5, 0xf4, 0x06, 0xa1, 0x3e, 0x87, 0x9b, 0x43, 0x15, 0xb8, + 0x1b, 0x07, 0xe8, 0xfd, 0x1b, 0x1a, 0x74, 0xab, 0xa7, 0xfe, 0x67, 0x0b, 0x15, 0x63, 0xe0, 0x43, + 0xb4, 0xa4, 0xfe, 0x2e, 0x99, 0xa3, 0xbf, 0xfb, 0x7a, 0x47, 0x7f, 0x12, 0xc6, 0x30, 0x79, 0xbb, + 0x94, 0x44, 0x34, 0x0b, 0xfe, 0x08, 0xad, 0xc6, 0x20, 0x04, 0x0d, 0x4c, 0x64, 0xf7, 0x8e, 0x71, + 0x5a, 0xed, 0xe5, 0x6a, 0x72, 0x6d, 0x77, 0xef, 0x5f, 0x5c, 0x36, 0x17, 0x5e, 0x5c, 0x36, 0x17, + 0x5e, 0x5e, 0x36, 0x17, 0x7e, 0xca, 0x9a, 0xd6, 0x45, 0xd6, 0xb4, 0x5e, 0x64, 0x4d, 0xeb, 0x65, + 0xd6, 0xb4, 0xfe, 0xce, 0x9a, 0xd6, 0x2f, 0xff, 0x34, 0x17, 0x9e, 0xad, 0x9a, 0xbe, 0xfd, 0x17, + 0x00, 0x00, 0xff, 0xff, 0xb4, 0x63, 0x7e, 0xa7, 0x0b, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index c270ace57..06b0f3d52 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -49,12 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachment{}, &VolumeAttachmentList{}, - - &CSIDriver{}, - &CSIDriverList{}, - - &CSINode{}, - &CSINodeList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index cca50d820..5702c21bc 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -166,14 +166,7 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` + // Placeholder for *VolumeSource to accommodate inline volumes in pods. } // VolumeAttachmentStatus is the status of a VolumeAttachment request. @@ -211,165 +204,8 @@ type VolumeError struct { Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` // String detailing the error encountered during Attach or Detach operation. - // This string may be logged, so it should not contain sensitive + // This string maybe logged, so it should not contain sensitive // information. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSIDriver captures information about a Container Storage Interface (CSI) -// volume driver deployed on the cluster. -// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the -// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically -// creates a CSIDriver object representing the driver. -// Kubernetes attach detach controller uses this object to determine whether attach is required. -// Kubelet uses this object to determine whether pod information needs to be passed on mount. -// CSIDriver objects are non-namespaced. -type CSIDriver struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // metadata.Name indicates the name of the CSI driver that this object - // refers to; it MUST be the same name returned by the CSI GetPluginName() - // call for that driver. - // The driver name must be 63 characters or less, beginning and ending with - // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the CSI Driver. - Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSIDriverList is a collection of CSIDriver objects. -type CSIDriverList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CSIDriver - Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CSIDriverSpec is the specification of a CSIDriver. -type CSIDriverSpec struct { - // attachRequired indicates this CSI volume driver requires an attach - // operation (because it implements the CSI ControllerPublishVolume() - // method), and that the Kubernetes attach detach controller should call - // the attach volume interface which checks the volumeattachment status - // and waits until the volume is attached before proceeding to mounting. - // The CSI external-attacher coordinates with CSI volume driver and updates - // the volumeattachment status when the attach operation is complete. - // If the CSIDriverRegistry feature gate is enabled and the value is - // specified to false, the attach operation will be skipped. - // Otherwise the attach operation will be called. - // +optional - AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. - // If set to false, pod information will not be passed on mount. - // Default is false. - // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. - // This list might grow, but the prefix will be used. - // "csi.storage.k8s.io/pod.name": pod.Name - // "csi.storage.k8s.io/pod.namespace": pod.Namespace - // "csi.storage.k8s.io/pod.uid": string(pod.UID) - // +optional - PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSINode holds information about all CSI drivers installed on a node. -// CSI drivers do not need to create the CSINode object directly. As long as -// they use the node-driver-registrar sidecar container, the kubelet will -// automatically populate the CSINode object for the CSI driver as part of -// kubelet plugin registration. -// CSINode has the same name as a node. If the object is missing, it means either -// there are no CSI Drivers available on the node, or the Kubelet version is low -// enough that it doesn't create this object. -// CSINode has an OwnerReference that points to the corresponding node object. -type CSINode struct { - metav1.TypeMeta `json:",inline"` - - // metadata.name must be the Kubernetes node name. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the specification of CSINode - Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// CSINodeSpec holds information about the specification of all CSI drivers installed on a node -type CSINodeSpec struct { - // drivers is a list of information of all CSI Drivers existing on a node. - // If all drivers in the list are uninstalled, this can become empty. - // +patchMergeKey=name - // +patchStrategy=merge - Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` -} - -// CSINodeDriver holds information about the specification of one CSI driver installed on a node -type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. - // This MUST be the same name returned by the CSI GetPluginName() call for - // that driver. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // nodeID of the node from the driver point of view. - // This field enables Kubernetes to communicate with storage systems that do - // not share the same nomenclature for nodes. For example, Kubernetes may - // refer to a given node as "node1", but the storage system may refer to - // the same node as "nodeA". When Kubernetes issues a command to the storage - // system to attach a volume to a specific node, it can use this field to - // refer to the node name using the ID that the storage system will - // understand, e.g. "nodeA" instead of "node1". This field is required. - NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` - - // topologyKeys is the list of keys supported by the driver. - // When a driver is initialized on a cluster, it provides a set of topology - // keys that it understands (e.g. "company.com/zone", "company.com/region"). - // When a driver is initialized on a node, it provides the same topology keys - // along with values. Kubelet will expose these topology keys as labels - // on its own node object. - // When Kubernetes does topology aware provisioning, it can use this list to - // determine which labels it should retrieve from the node object and pass - // back to the driver. - // It is possible for different nodes to use different topology keys. - // This can be empty if driver does not support topology. - // +optional - TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSINodeList is a collection of CSINode objects. -type CSINodeList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CSINode - Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ec741ecf7..044d69f58 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -27,76 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CSIDriver = map[string]string{ - "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", - "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", -} - -func (CSIDriver) SwaggerDoc() map[string]string { - return map_CSIDriver -} - -var map_CSIDriverList = map[string]string{ - "": "CSIDriverList is a collection of CSIDriver objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of CSIDriver", -} - -func (CSIDriverList) SwaggerDoc() map[string]string { - return map_CSIDriverList -} - -var map_CSIDriverSpec = map[string]string{ - "": "CSIDriverSpec is the specification of a CSIDriver.", - "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID)", -} - -func (CSIDriverSpec) SwaggerDoc() map[string]string { - return map_CSIDriverSpec -} - -var map_CSINode = map[string]string{ - "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", - "spec": "spec is the specification of CSINode", -} - -func (CSINode) SwaggerDoc() map[string]string { - return map_CSINode -} - -var map_CSINodeDriver = map[string]string{ - "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", - "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", - "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", -} - -func (CSINodeDriver) SwaggerDoc() map[string]string { - return map_CSINodeDriver -} - -var map_CSINodeList = map[string]string{ - "": "CSINodeList is a collection of CSINode objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of CSINode", -} - -func (CSINodeList) SwaggerDoc() map[string]string { - return map_CSINodeList -} - -var map_CSINodeSpec = map[string]string{ - "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", - "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", -} - -func (CSINodeSpec) SwaggerDoc() map[string]string { - return map_CSINodeSpec -} - var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -145,7 +75,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } @@ -179,7 +109,7 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 305942332..8096dba9b 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -25,196 +25,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. -func (in *CSIDriver) DeepCopy() *CSIDriver { - if in == nil { - return nil - } - out := new(CSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriver) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSIDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. -func (in *CSIDriverList) DeepCopy() *CSIDriverList { - if in == nil { - return nil - } - out := new(CSIDriverList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriverList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { - *out = *in - if in.AttachRequired != nil { - in, out := &in.AttachRequired, &out.AttachRequired - *out = new(bool) - **out = **in - } - if in.PodInfoOnMount != nil { - in, out := &in.PodInfoOnMount, &out.PodInfoOnMount - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. -func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { - if in == nil { - return nil - } - out := new(CSIDriverSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINode) DeepCopyInto(out *CSINode) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. -func (in *CSINode) DeepCopy() *CSINode { - if in == nil { - return nil - } - out := new(CSINode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSINode) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { - *out = *in - if in.TopologyKeys != nil { - in, out := &in.TopologyKeys, &out.TopologyKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. -func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { - if in == nil { - return nil - } - out := new(CSINodeDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSINode, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. -func (in *CSINodeList) DeepCopy() *CSINodeList { - if in == nil { - return nil - } - out := new(CSINodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSINodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { - *out = *in - if in.Drivers != nil { - in, out := &in.Drivers, &out.Drivers - *out = make([]CSINodeDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. -func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { - if in == nil { - return nil - } - out := new(CSINodeSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -279,7 +89,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -340,7 +150,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -377,11 +187,6 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } - if in.InlineVolumeSpec != nil { - in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec - *out = new(v1.PersistentVolumeSpec) - (*in).DeepCopyInto(*out) - } return } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index f4201eb69..77c91bbc7 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "net/http" - "reflect" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -83,20 +82,7 @@ func (u *UnexpectedObjectError) Error() string { func FromObject(obj runtime.Object) error { switch t := obj.(type) { case *metav1.Status: - return &StatusError{ErrStatus: *t} - case runtime.Unstructured: - var status metav1.Status - obj := t.UnstructuredContent() - if !reflect.DeepEqual(obj["kind"], "Status") { - break - } - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil { - return err - } - if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" { - break - } - return &StatusError{ErrStatus: status} + return &StatusError{*t} } return &UnexpectedObjectError{obj} } @@ -184,20 +170,6 @@ func NewConflict(qualifiedResource schema.GroupResource, name string, err error) }} } -// NewApplyConflict returns an error including details on the requests apply conflicts -func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError { - return &StatusError{ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Code: http.StatusConflict, - Reason: metav1.StatusReasonConflict, - Details: &metav1.StatusDetails{ - // TODO: Get obj details here? - Causes: causes, - }, - Message: message, - }} -} - // NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. func NewGone(message string) *StatusError { return &StatusError{metav1.Status{ @@ -394,11 +366,7 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr case http.StatusNotAcceptable: reason = metav1.StatusReasonNotAcceptable // the server message has details about what types are acceptable - if len(serverMessage) == 0 || serverMessage == "unknown" { - message = "the server was unable to respond with a content type that the client supports" - } else { - message = serverMessage - } + message = serverMessage case http.StatusUnsupportedMediaType: reason = metav1.StatusReasonUnsupportedMediaType // the server message has details about what types are acceptable @@ -621,46 +589,3 @@ func ReasonForError(err error) metav1.StatusReason { } return metav1.StatusReasonUnknown } - -// ErrorReporter converts generic errors into runtime.Object errors without -// requiring the caller to take a dependency on meta/v1 (where Status lives). -// This prevents circular dependencies in core watch code. -type ErrorReporter struct { - code int - verb string - reason string -} - -// NewClientErrorReporter will respond with valid v1.Status objects that report -// unexpected server responses. Primarily used by watch to report errors when -// we attempt to decode a response from the server and it is not in the form -// we expect. Because watch is a dependency of the core api, we can't return -// meta/v1.Status in that package and so much inject this interface to convert a -// generic error as appropriate. The reason is passed as a unique status cause -// on the returned status, otherwise the generic "ClientError" is returned. -func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter { - return &ErrorReporter{ - code: code, - verb: verb, - reason: reason, - } -} - -// AsObject returns a valid error runtime.Object (a v1.Status) for the given -// error, using the code and verb of the reporter type. The error is set to -// indicate that this was an unexpected server response. -func (r *ErrorReporter) AsObject(err error) runtime.Object { - status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true) - if status.ErrStatus.Details == nil { - status.ErrStatus.Details = &metav1.StatusDetails{} - } - reason := r.reason - if len(reason) == 0 { - reason = "ClientError" - } - status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{ - Type: metav1.CauseType(reason), - Message: err.Error(), - }) - return &status.ErrStatus -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 50468b533..c70b3d2b6 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -17,76 +17,30 @@ limitations under the License. package meta import ( - "errors" "fmt" "reflect" - "sync" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" ) -var ( - // isListCache maintains a cache of types that are checked for lists - // which is used by IsListType. - // TODO: remove and replace with an interface check - isListCache = struct { - lock sync.RWMutex - byType map[reflect.Type]bool - }{ - byType: make(map[reflect.Type]bool, 1024), - } -) - -// IsListType returns true if the provided Object has a slice called Items. -// TODO: Replace the code in this check with an interface comparison by -// creating and enforcing that lists implement a list accessor. +// IsListType returns true if the provided Object has a slice called Items func IsListType(obj runtime.Object) bool { - switch t := obj.(type) { - case runtime.Unstructured: - return t.IsList() - } - t := reflect.TypeOf(obj) - - isListCache.lock.RLock() - ok, exists := isListCache.byType[t] - isListCache.lock.RUnlock() - - if !exists { - _, err := getItemsPtr(obj) - ok = err == nil - - // cache only the first 1024 types - isListCache.lock.Lock() - if len(isListCache.byType) < 1024 { - isListCache.byType[t] = ok - } - isListCache.lock.Unlock() + // if we're a runtime.Unstructured, check whether this is a list. + // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.IsList() } - return ok + _, err := GetItemsPtr(obj) + return err == nil } -var ( - errExpectFieldItems = errors.New("no Items field in this object") - errExpectSliceItems = errors.New("Items field must be a slice of objects") -) - // GetItemsPtr returns a pointer to the list object's Items member. // If 'list' doesn't have an Items member, it's not really a list type // and an error will be returned. // This function will either return a pointer to a slice, or an error, but not both. -// TODO: this will be replaced with an interface in the future func GetItemsPtr(list runtime.Object) (interface{}, error) { - obj, err := getItemsPtr(list) - if err != nil { - return nil, fmt.Errorf("%T is not a list: %v", list, err) - } - return obj, nil -} - -// getItemsPtr returns a pointer to the list object's Items member or an error. -func getItemsPtr(list runtime.Object) (interface{}, error) { v, err := conversion.EnforcePtr(list) if err != nil { return nil, err @@ -94,19 +48,19 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { items := v.FieldByName("Items") if !items.IsValid() { - return nil, errExpectFieldItems + return nil, fmt.Errorf("no Items field in %#v", list) } switch items.Kind() { case reflect.Interface, reflect.Ptr: target := reflect.TypeOf(items.Interface()).Elem() if target.Kind() != reflect.Slice { - return nil, errExpectSliceItems + return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) } return items.Interface(), nil case reflect.Slice: return items.Addr().Interface(), nil default: - return nil, errExpectSliceItems + return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) } } @@ -204,19 +158,6 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { // objectSliceType is the type of a slice of Objects var objectSliceType = reflect.TypeOf([]runtime.Object{}) -// LenList returns the length of this list or 0 if it is not a list. -func LenList(list runtime.Object) int { - itemsPtr, err := GetItemsPtr(list) - if err != nil { - return 0 - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return 0 - } - return items.Len() -} - // SetList sets the given list object's Items member have the elements given in // objects. // Returns an error if list is not a List type (does not have an Items member), diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 086bce04b..1c2a83cfa 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -20,12 +20,14 @@ import ( "fmt" "reflect" + "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" ) // errNotList is returned when an object implements the Object style interfaces but not the List style @@ -113,12 +115,12 @@ func Accessor(obj interface{}) (metav1.Object, error) { // AsPartialObjectMetadata takes the metav1 interface and returns a partial object. // TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { +func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { switch t := m.(type) { case *metav1.ObjectMeta: - return &metav1.PartialObjectMetadata{ObjectMeta: *t} + return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} default: - return &metav1.PartialObjectMetadata{ + return &metav1beta1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: m.GetName(), GenerateName: m.GetGenerateName(), @@ -130,13 +132,12 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { CreationTimestamp: m.GetCreationTimestamp(), DeletionTimestamp: m.GetDeletionTimestamp(), DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), - Labels: m.GetLabels(), - Annotations: m.GetAnnotations(), - OwnerReferences: m.GetOwnerReferences(), - Finalizers: m.GetFinalizers(), - ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), - ManagedFields: m.GetManagedFields(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + Initializers: m.GetInitializers(), }, } } @@ -606,7 +607,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - klog.Errorf("expect %v to be a pointer to slice", s) + glog.Errorf("expect %v to be a pointer to slice", s) return ret } s = s.Elem() @@ -614,7 +615,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) for i := 0; i < s.Len(); i++ { if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - klog.Errorf("extractFromOwnerReference failed: %v", err) + glog.Errorf("extractFromOwnerReference failed: %v", err) return ret } } @@ -624,13 +625,13 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - klog.Errorf("expect %v to be a pointer to slice", s) + glog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) for i := 0; i < len(references); i++ { if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - klog.Errorf("setOwnerReference failed: %v", err) + glog.Errorf("setOwnerReference failed: %v", err) return } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 9d7835bc2..802f22a63 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// DO NOT EDIT! /* Package resource is a generated protocol buffer package. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 7f63175d3..72d3880c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -194,9 +194,9 @@ func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { } if fraction { if base > 0 { - value++ + value += 1 } else { - value-- + value += -1 } } return value, !fraction diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 93a6c0c50..b155a62a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -584,12 +584,6 @@ func (q *Quantity) Neg() { q.d.Dec.Neg(q.d.Dec) } -// Equal checks equality of two Quantities. This is useful for testing with -// cmp.Equal. -func (q Quantity) Equal(v Quantity) bool { - return q.Cmp(v) == 0 -} - // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 @@ -686,7 +680,7 @@ func NewScaledQuantity(value int64, scale Scale) *Quantity { } } -// Value returns the unscaled value of q rounded up to the nearest integer away from 0. +// Value returns the value of q; any fractional part will be lost. func (q *Quantity) Value() int64 { return q.ScaledValue(0) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go index cf668c7c8..44b9b1600 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -176,17 +176,40 @@ func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, name allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.GetClusterName(), msg)) } } - for _, entry := range meta.GetManagedFields() { - allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...) - } allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...) allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, ValidateInitializers(meta.GetInitializers(), fldPath.Child("initializers"))...) allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) return allErrs } +func ValidateInitializers(initializers *metav1.Initializers, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if initializers == nil { + return allErrs + } + for i, initializer := range initializers.Pending { + allErrs = append(allErrs, validation.IsFullyQualifiedName(fldPath.Child("pending").Index(i).Child("name"), initializer.Name)...) + } + allErrs = append(allErrs, validateInitializersResult(initializers.Result, fldPath.Child("result"))...) + return allErrs +} + +func validateInitializersResult(result *metav1.Status, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if result == nil { + return allErrs + } + switch result.Status { + case metav1.StatusFailure: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("status"), result.Status, "must be 'Failure'")) + } + return allErrs +} + // ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers. func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -242,9 +265,8 @@ func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *f allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented")) } - for _, entry := range newMeta.GetManagedFields() { - allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...) - } + allErrs = append(allErrs, ValidateInitializersUpdate(newMeta.GetInitializers(), oldMeta.GetInitializers(), fldPath.Child("initializers"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...) allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...) allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...) @@ -259,3 +281,28 @@ func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *f return allErrs } + +// ValidateInitializersUpdate checks the update of the metadata initializers field +func ValidateInitializersUpdate(newInit, oldInit *metav1.Initializers, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + switch { + case oldInit == nil && newInit != nil: + // Initializers may not be set on new objects + allErrs = append(allErrs, field.Invalid(fldPath, nil, "field is immutable once initialization has completed")) + case oldInit != nil && newInit == nil: + // this is a valid transition and means initialization was successful + case oldInit != nil && newInit != nil: + // validate changes to initializers + switch { + case oldInit.Result == nil && newInit.Result != nil: + // setting a result is allowed + allErrs = append(allErrs, validateInitializersResult(newInit.Result, fldPath.Child("result"))...) + case oldInit.Result != nil: + // setting Result implies permanent failure, and all future updates will be prevented + allErrs = append(allErrs, ValidateImmutableField(newInit.Result, oldInit.Result, fldPath.Child("result"))...) + default: + // leaving the result nil is allowed + } + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go index bdb71340a..673e56212 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go @@ -28,6 +28,7 @@ func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err } + out.IncludeUninitialized = in.IncludeUninitialized out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = in.TimeoutSeconds out.Watch = in.Watch @@ -43,6 +44,7 @@ func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOption if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err } + out.IncludeUninitialized = in.IncludeUninitialized out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = in.TimeoutSeconds out.Watch = in.Watch diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go index 2741ee2c8..1e85c5c43 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 -package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion" +package internalversion diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go index d0149810b..46b8605f4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -89,12 +89,12 @@ func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &metav1beta1.PartialObjectMetadata{}, &metav1beta1.PartialObjectMetadataList{}, ) - if err := metav1beta1.AddMetaToScheme(scheme); err != nil { - return err - } - if err := metav1.AddMetaToScheme(scheme); err != nil { - return err - } + scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, + &metav1beta1.Table{}, + &metav1beta1.TableOptions{}, + &metav1beta1.PartialObjectMetadata{}, + &metav1beta1.PartialObjectMetadataList{}, + ) // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{}, diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index 8d2544168..2f6740d36 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -33,17 +33,11 @@ type ListOptions struct { LabelSelector labels.Selector // A selector based on fields FieldSelector fields.Selector + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool // If true, watch for changes to this list Watch bool - // allowWatchBookmarks requests watch events with type "BOOKMARK". - // Servers that do not implement bookmarks may ignore this flag and - // bookmarks are sent at the server's discretion. Clients should not - // assume bookmarks are returned at any specific interval, nor may they - // assume the server will send any BOOKMARK event during a session. - // If this is not a watch, this field is ignored. - // If the feature gate WatchBookmarks is not enabled in apiserver, - // this field is ignored. - AllowWatchBookmarks bool // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index c6ed19bc2..18d190b24 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -117,8 +117,8 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } + out.IncludeUninitialized = in.IncludeUninitialized out.Watch = in.Watch - out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit @@ -133,8 +133,8 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } + out.IncludeUninitialized = in.IncludeUninitialized out.Watch = in.Watch - out.AllowWatchBookmarks = in.AllowWatchBookmarks out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index d5e4fc680..81d85e96e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -28,7 +28,7 @@ import ( func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.Object, len(*in)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index d07069ef2..5c36f82c1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -77,8 +77,6 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_Slice_string_To_Slice_int32, Convert_Slice_string_To_v1_DeletionPropagation, - - Convert_Slice_string_To_v1_IncludeObjectPolicy, ) } @@ -319,11 +317,3 @@ func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *Deleti } return nil } - -// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value -func Convert_Slice_string_To_v1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { - if len(*input) > 0 { - *out = IncludeObjectPolicy((*input)[0]) - } - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index dbaa87c87..61f201cdf 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -19,5 +19,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io - package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index babe8a8b5..2eaabf079 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -48,13 +48,3 @@ func (d *Duration) UnmarshalJSON(b []byte) error { func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ Duration) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index c8ff6e396..b7508f033 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -33,7 +34,6 @@ limitations under the License. DeleteOptions Duration ExportOptions - Fields GetOptions GroupKind GroupResource @@ -48,21 +48,16 @@ limitations under the License. List ListMeta ListOptions - ManagedFieldsEntry MicroTime ObjectMeta OwnerReference - PartialObjectMetadata - PartialObjectMetadataList Patch - PatchOptions Preconditions RootPaths ServerAddressByClientCIDR Status StatusCause StatusDetails - TableOptions Time Timestamp TypeMeta @@ -136,157 +131,131 @@ func (m *ExportOptions) Reset() { *m = ExportOptions{} } func (*ExportOptions) ProtoMessage() {} func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *Fields) Reset() { *m = Fields{} } -func (*Fields) ProtoMessage() {} -func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptorGenerated, []int{13} } func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *Initializer) Reset() { *m = Initializer{} } func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *Initializers) Reset() { *m = Initializers{} } func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptorGenerated, []int{19} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } -func (*ManagedFieldsEntry) ProtoMessage() {} -func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } - -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } - -func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } -func (*PartialObjectMetadataList) ProtoMessage() {} -func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{29} -} +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } - -func (m *PatchOptions) Reset() { *m = PatchOptions{} } -func (*PatchOptions) ProtoMessage() {} -func (*PatchOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{29} } func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } - -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -298,7 +267,6 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") - proto.RegisterType((*Fields)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Fields") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -313,21 +281,16 @@ func init() { proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") - proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") - proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") - proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") - proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") - proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions") proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") @@ -502,10 +465,6 @@ func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) - i += copy(dAtA[i:], m.StorageVersionHash) return i, nil } @@ -618,10 +577,14 @@ func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - dAtA[i] = 0x1a + dAtA[i] = 0x10 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) return i, nil } @@ -744,55 +707,6 @@ func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *Fields) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Fields) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Map) > 0 { - keysForMap := make([]string, 0, len(m.Map)) - for k := range m.Map { - keysForMap = append(keysForMap, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - for _, k := range keysForMap { - dAtA[i] = 0xa - i++ - v := m.Map[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n4, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - } - return i, nil -} - func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -812,6 +726,14 @@ func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x10 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1032,11 +954,11 @@ func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n5, err := m.Result.MarshalTo(dAtA[i:]) + n4, err := m.Result.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n4 } return i, nil } @@ -1152,11 +1074,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1199,11 +1121,6 @@ func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) i += copy(dAtA[i:], m.Continue) - if m.RemainingItemCount != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) - } return i, nil } @@ -1247,71 +1164,21 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - dAtA[i] = 0x48 + dAtA[i] = 0x30 i++ - if m.AllowWatchBookmarks { + if m.IncludeUninitialized { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ - return i, nil -} - -func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) - i += copy(dAtA[i:], m.Manager) - dAtA[i] = 0x12 + dAtA[i] = 0x38 i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Time != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n7, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.Fields != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Fields.Size())) - n8, err := m.Fields.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) return i, nil } @@ -1360,20 +1227,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n9, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n6 if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n10, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n7 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -1461,25 +1328,11 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n11, err := m.Initializers.MarshalTo(dAtA[i:]) + n8, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 - } - if len(m.ManagedFields) > 0 { - for _, msg := range m.ManagedFields { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i += n8 } return i, nil } @@ -1538,70 +1391,6 @@ func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - return i, nil -} - -func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n13, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1620,53 +1409,6 @@ func (m *Patch) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *PatchOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Force != nil { - dAtA[i] = 0x10 - i++ - if *m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil -} - func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1688,12 +1430,6 @@ func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) i += copy(dAtA[i:], *m.UID) } - if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) - } return i, nil } @@ -1774,11 +1510,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n14, err := m.ListMeta.MarshalTo(dAtA[i:]) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n9 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) @@ -1795,11 +1531,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n15, err := m.Details.MarshalTo(dAtA[i:]) + n10, err := m.Details.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n10 } dAtA[i] = 0x30 i++ @@ -1886,28 +1622,6 @@ func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *TableOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i += copy(dAtA[i:], m.IncludeObject) - return i, nil -} - func (m *Timestamp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1988,10 +1702,6 @@ func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) return i, nil } @@ -2050,14 +1760,32 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n16, err := m.Object.MarshalTo(dAtA[i:]) + n11, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n11 return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2131,8 +1859,6 @@ func (m *APIResource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Version) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageVersionHash) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2177,8 +1903,7 @@ func (m *CreateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.FieldManager) - n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -2223,26 +1948,12 @@ func (m *ExportOptions) Size() (n int) { return n } -func (m *Fields) Size() (n int) { - var l int - _ = l - if len(m.Map) > 0 { - for k, v := range m.Map { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *GetOptions) Size() (n int) { +func (m *GetOptions) Size() (n int) { var l int _ = l l = len(m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -2393,9 +2104,6 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) - if m.RemainingItemCount != nil { - n += 1 + sovGenerated(uint64(*m.RemainingItemCount)) - } return n } @@ -2412,30 +2120,10 @@ func (m *ListOptions) Size() (n int) { if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } + n += 2 n += 1 + sovGenerated(uint64(m.Limit)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ManagedFieldsEntry) Size() (n int) { - var l int - _ = l - l = len(m.Manager) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - if m.Time != nil { - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Fields != nil { - l = m.Fields.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -2498,12 +2186,6 @@ func (m *ObjectMeta) Size() (n int) { l = m.Initializers.Size() n += 2 + l + sovGenerated(uint64(l)) } - if len(m.ManagedFields) > 0 { - for _, e := range m.ManagedFields { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } return n } @@ -2527,51 +2209,12 @@ func (m *OwnerReference) Size() (n int) { return n } -func (m *PartialObjectMetadata) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PartialObjectMetadataList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *Patch) Size() (n int) { var l int _ = l return n } -func (m *PatchOptions) Size() (n int) { - var l int - _ = l - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Force != nil { - n += 2 - } - l = len(m.FieldManager) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *Preconditions) Size() (n int) { var l int _ = l @@ -2579,10 +2222,6 @@ func (m *Preconditions) Size() (n int) { l = len(*m.UID) n += 1 + l + sovGenerated(uint64(l)) } - if m.ResourceVersion != nil { - l = len(*m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -2660,14 +2299,6 @@ func (m *StatusDetails) Size() (n int) { return n } -func (m *TableOptions) Size() (n int) { - var l int - _ = l - l = len(m.IncludeObject) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *Timestamp) Size() (n int) { var l int _ = l @@ -2695,8 +2326,6 @@ func (m *UpdateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.FieldManager) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2772,7 +2401,6 @@ func (this *APIResource) String() string { `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`, `}`, }, "") return s @@ -2794,7 +2422,7 @@ func (this *CreateOptions) String() string { } s := strings.Join([]string{`&CreateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `}`, }, "") return s @@ -2834,32 +2462,13 @@ func (this *ExportOptions) String() string { }, "") return s } -func (this *Fields) String() string { - if this == nil { - return "nil" - } - keysForMap := make([]string, 0, len(this.Map)) - for k := range this.Map { - keysForMap = append(keysForMap, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - mapStringForMap := "map[string]Fields{" - for _, k := range keysForMap { - mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) - } - mapStringForMap += "}" - s := strings.Join([]string{`&Fields{`, - `Map:` + mapStringForMap + `,`, - `}`, - }, "") - return s -} func (this *GetOptions) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&GetOptions{`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `}`, }, "") return s @@ -2948,7 +2557,6 @@ func (this *ListMeta) String() string { `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, - `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`, `}`, }, "") return s @@ -2963,23 +2571,9 @@ func (this *ListOptions) String() string { `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, - `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, - `}`, - }, "") - return s -} -func (this *ManagedFieldsEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ManagedFieldsEntry{`, - `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, - `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Fields", "Fields", 1) + `,`, `}`, }, "") return s @@ -3025,7 +2619,6 @@ func (this *ObjectMeta) String() string { `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, - `ManagedFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ManagedFields), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3045,27 +2638,6 @@ func (this *OwnerReference) String() string { }, "") return s } -func (this *PartialObjectMetadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadata{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PartialObjectMetadataList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadataList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *Patch) String() string { if this == nil { return "nil" @@ -3075,25 +2647,12 @@ func (this *Patch) String() string { }, "") return s } -func (this *PatchOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PatchOptions{`, - `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `Force:` + valueToStringGenerated(this.Force) + `,`, - `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, - `}`, - }, "") - return s -} func (this *Preconditions) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Preconditions{`, `UID:` + valueToStringGenerated(this.UID) + `,`, - `ResourceVersion:` + valueToStringGenerated(this.ResourceVersion) + `,`, `}`, }, "") return s @@ -3161,16 +2720,6 @@ func (this *StatusDetails) String() string { }, "") return s } -func (this *TableOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TableOptions{`, - `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, - `}`, - }, "") - return s -} func (this *Timestamp) String() string { if this == nil { return "nil" @@ -3199,7 +2748,6 @@ func (this *UpdateOptions) String() string { } s := strings.Join([]string{`&UpdateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, `}`, }, "") return s @@ -3760,35 +3308,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StorageVersionHash = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4088,11 +3607,11 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4102,21 +3621,12 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldManager = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.IncludeUninitialized = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4480,7 +3990,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *Fields) Unmarshal(dAtA []byte) error { +func (m *GetOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4503,17 +4013,17 @@ func (m *Fields) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Fields: wiretype end group for non-group") + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4523,115 +4033,41 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Map == nil { - m.Map = make(map[string]Fields) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) } - var mapkey string - mapvalue := &Fields{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Fields{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } } - m.Map[mapkey] = *mapvalue - iNdEx = postIndex + m.IncludeUninitialized = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4653,7 +4089,7 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetOptions) Unmarshal(dAtA []byte) error { +func (m *GroupKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4676,15 +4112,15 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4709,7 +4145,36 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4732,115 +4197,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupKind) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupResource) Unmarshal(dAtA []byte) error { +func (m *GroupResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5686,14 +5043,51 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.MatchLabels == nil { m.MatchLabels = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5703,80 +5097,41 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.MatchLabels[mapkey] = mapvalue + } else { + var mapvalue string + m.MatchLabels[mapkey] = mapvalue } - m.MatchLabels[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -6194,26 +5549,6 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RemainingItemCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6391,6 +5726,26 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } } m.TimeoutSeconds = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeUninitialized = bool(v != 0) case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) @@ -6439,26 +5794,6 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowWatchBookmarks = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6480,7 +5815,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6503,15 +5838,15 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ManagedFieldsEntry: wiretype end group for non-group") + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ManagedFieldsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6536,11 +5871,11 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Manager = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6565,11 +5900,11 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Operation = ManagedFieldsOperationType(dAtA[iNdEx:postIndex]) + m.GenerateName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6594,13 +5929,13 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6610,30 +5945,26 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Time == nil { - m.Time = &Time{} - } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SelfLink = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6643,78 +5974,24 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Fields == nil { - m.Fields = &Fields{} - } - if err := m.Fields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6739,13 +6016,13 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) } - var stringLen uint64 + m.Generation = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6755,26 +6032,16 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Generation |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GenerateName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6784,26 +6051,27 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6813,26 +6081,30 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.SelfLink = string(dAtA[iNdEx:postIndex]) + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &Time{} + } + if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) } - var stringLen uint64 + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6842,26 +6114,17 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: + m.DeletionGracePeriodSeconds = &v + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6871,26 +6134,19 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - m.Generation = 0 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6900,16 +6156,12 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6919,25 +6171,74 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue } iNdEx = postIndex - case 9: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6961,18 +6262,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DeletionTimestamp == nil { - m.DeletionTimestamp = &Time{} - } - if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) - } - var v int64 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6982,17 +6272,12 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.DeletionGracePeriodSeconds = &v - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7002,26 +6287,26 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Annotations == nil { + m.Annotations = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7031,84 +6316,45 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Annotations[mapkey] = mapvalue + } else { + var mapvalue string + m.Annotations[mapkey] = mapvalue } - m.Labels[mapkey] = mapvalue iNdEx = postIndex - case 12: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7132,127 +6378,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 14: @@ -7346,37 +6474,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) - if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7489,396 +6586,34 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Controller = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.BlockOwnerDeletion = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PartialObjectMetadata{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Patch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Patch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PatchOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PatchOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PatchOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7903,11 +6638,11 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -7925,12 +6660,12 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.Force = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7940,21 +6675,63 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.FieldManager = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8035,36 +6812,6 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) m.UID = &s iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8845,85 +7592,6 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } return nil } -func (m *TableOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Timestamp) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9178,35 +7846,6 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldManager = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9526,182 +8165,160 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2820 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0xcf, 0x6f, 0x1c, 0x57, - 0xd9, 0xb3, 0xeb, 0x5d, 0xef, 0x7e, 0xeb, 0x4d, 0xec, 0x97, 0x04, 0xb6, 0x46, 0x78, 0xdd, 0x29, - 0xaa, 0x52, 0x48, 0xd7, 0x4d, 0x4a, 0xab, 0x90, 0xd2, 0x82, 0xd7, 0x76, 0x52, 0xd3, 0xb8, 0xb1, - 0x9e, 0x93, 0x20, 0x42, 0x84, 0x3a, 0xde, 0x79, 0x5e, 0x0f, 0x9e, 0x9d, 0x99, 0xbe, 0x37, 0xeb, - 0xc4, 0x70, 0xa0, 0x07, 0x10, 0x20, 0x41, 0xd5, 0x23, 0x27, 0xd4, 0x0a, 0xfe, 0x02, 0x4e, 0x9c, - 0x38, 0x55, 0xa2, 0x17, 0xa4, 0x4a, 0x5c, 0x2a, 0x81, 0x56, 0xad, 0x41, 0x82, 0x1b, 0xe2, 0xea, - 0x13, 0x7a, 0xbf, 0x66, 0xde, 0xec, 0x7a, 0xe3, 0x59, 0x52, 0x2a, 0x4e, 0x3b, 0xf3, 0xfd, 0x7e, - 0xef, 0x7d, 0xef, 0xfb, 0x35, 0x0b, 0x9b, 0xfb, 0x57, 0x59, 0xcb, 0x0b, 0x97, 0xf7, 0xfb, 0x3b, - 0x84, 0x06, 0x24, 0x26, 0x6c, 0xf9, 0x80, 0x04, 0x6e, 0x48, 0x97, 0x15, 0xc2, 0x89, 0xbc, 0x9e, - 0xd3, 0xd9, 0xf3, 0x02, 0x42, 0x0f, 0x97, 0xa3, 0xfd, 0x2e, 0x07, 0xb0, 0xe5, 0x1e, 0x89, 0x9d, - 0xe5, 0x83, 0xcb, 0xcb, 0x5d, 0x12, 0x10, 0xea, 0xc4, 0xc4, 0x6d, 0x45, 0x34, 0x8c, 0x43, 0xf4, - 0x25, 0xc9, 0xd5, 0x32, 0xb9, 0x5a, 0xd1, 0x7e, 0x97, 0x03, 0x58, 0x8b, 0x73, 0xb5, 0x0e, 0x2e, - 0x2f, 0x3c, 0xdb, 0xf5, 0xe2, 0xbd, 0xfe, 0x4e, 0xab, 0x13, 0xf6, 0x96, 0xbb, 0x61, 0x37, 0x5c, - 0x16, 0xcc, 0x3b, 0xfd, 0x5d, 0xf1, 0x26, 0x5e, 0xc4, 0x93, 0x14, 0xba, 0x30, 0xd6, 0x14, 0xda, - 0x0f, 0x62, 0xaf, 0x47, 0x86, 0xad, 0x58, 0x78, 0xf1, 0x34, 0x06, 0xd6, 0xd9, 0x23, 0x3d, 0x67, - 0x98, 0xcf, 0xfe, 0x63, 0x11, 0x2a, 0x2b, 0x5b, 0x1b, 0x37, 0x68, 0xd8, 0x8f, 0xd0, 0x12, 0x4c, - 0x07, 0x4e, 0x8f, 0x34, 0xac, 0x25, 0xeb, 0x62, 0xb5, 0x3d, 0xfb, 0xc1, 0xa0, 0x39, 0x75, 0x34, - 0x68, 0x4e, 0xbf, 0xee, 0xf4, 0x08, 0x16, 0x18, 0xe4, 0x43, 0xe5, 0x80, 0x50, 0xe6, 0x85, 0x01, - 0x6b, 0x14, 0x96, 0x8a, 0x17, 0x6b, 0x57, 0x5e, 0x69, 0xe5, 0x59, 0x7f, 0x4b, 0x28, 0xb8, 0x2b, - 0x59, 0xaf, 0x87, 0x74, 0xcd, 0x63, 0x9d, 0xf0, 0x80, 0xd0, 0xc3, 0xf6, 0x9c, 0xd2, 0x52, 0x51, - 0x48, 0x86, 0x13, 0x0d, 0xe8, 0xc7, 0x16, 0xcc, 0x45, 0x94, 0xec, 0x12, 0x4a, 0x89, 0xab, 0xf0, - 0x8d, 0xe2, 0x92, 0xf5, 0x29, 0xa8, 0x6d, 0x28, 0xb5, 0x73, 0x5b, 0x43, 0xf2, 0xf1, 0x88, 0x46, - 0xf4, 0x1b, 0x0b, 0x16, 0x18, 0xa1, 0x07, 0x84, 0xae, 0xb8, 0x2e, 0x25, 0x8c, 0xb5, 0x0f, 0x57, - 0x7d, 0x8f, 0x04, 0xf1, 0xea, 0xc6, 0x1a, 0x66, 0x8d, 0x69, 0xb1, 0x0f, 0xdf, 0xc8, 0x67, 0xd0, - 0xf6, 0x38, 0x39, 0x6d, 0x5b, 0x59, 0xb4, 0x30, 0x96, 0x84, 0xe1, 0x47, 0x98, 0x61, 0xef, 0xc2, - 0xac, 0x3e, 0xc8, 0x9b, 0x1e, 0x8b, 0xd1, 0x5d, 0x28, 0x77, 0xf9, 0x0b, 0x6b, 0x58, 0xc2, 0xc0, - 0x56, 0x3e, 0x03, 0xb5, 0x8c, 0xf6, 0x19, 0x65, 0x4f, 0x59, 0xbc, 0x32, 0xac, 0xa4, 0xd9, 0x3f, - 0x9f, 0x86, 0xda, 0xca, 0xd6, 0x06, 0x26, 0x2c, 0xec, 0xd3, 0x0e, 0xc9, 0xe1, 0x34, 0x57, 0x00, - 0xf8, 0x2f, 0x8b, 0x9c, 0x0e, 0x71, 0x1b, 0x85, 0x25, 0xeb, 0x62, 0xa5, 0x8d, 0x14, 0x1d, 0xbc, - 0x9e, 0x60, 0xb0, 0x41, 0xc5, 0xa5, 0xee, 0x7b, 0x81, 0x2b, 0x4e, 0xdb, 0x90, 0xfa, 0x9a, 0x17, - 0xb8, 0x58, 0x60, 0xd0, 0x4d, 0x28, 0x1d, 0x10, 0xba, 0xc3, 0xf7, 0x9f, 0x3b, 0xc4, 0x57, 0xf2, - 0x2d, 0xef, 0x2e, 0x67, 0x69, 0x57, 0x8f, 0x06, 0xcd, 0x92, 0x78, 0xc4, 0x52, 0x08, 0x6a, 0x01, - 0xb0, 0xbd, 0x90, 0xc6, 0xc2, 0x9c, 0x46, 0x69, 0xa9, 0x78, 0xb1, 0xda, 0x3e, 0xc3, 0xed, 0xdb, - 0x4e, 0xa0, 0xd8, 0xa0, 0x40, 0x57, 0x61, 0x96, 0x79, 0x41, 0xb7, 0xef, 0x3b, 0x94, 0x03, 0x1a, - 0x65, 0x61, 0xe7, 0x79, 0x65, 0xe7, 0xec, 0xb6, 0x81, 0xc3, 0x19, 0x4a, 0xae, 0xa9, 0xe3, 0xc4, - 0xa4, 0x1b, 0x52, 0x8f, 0xb0, 0xc6, 0x4c, 0xaa, 0x69, 0x35, 0x81, 0x62, 0x83, 0x02, 0x3d, 0x05, - 0x25, 0xb1, 0xf3, 0x8d, 0x8a, 0x50, 0x51, 0x57, 0x2a, 0x4a, 0xe2, 0x58, 0xb0, 0xc4, 0xa1, 0x67, - 0x60, 0x46, 0xdd, 0x9a, 0x46, 0x55, 0x90, 0x9d, 0x55, 0x64, 0x33, 0xda, 0xad, 0x35, 0x1e, 0x7d, - 0x0b, 0x10, 0x8b, 0x43, 0xea, 0x74, 0x89, 0x42, 0xbd, 0xea, 0xb0, 0xbd, 0x06, 0x08, 0xae, 0x05, - 0xc5, 0x85, 0xb6, 0x47, 0x28, 0xf0, 0x09, 0x5c, 0xf6, 0xef, 0x2c, 0x38, 0x6b, 0xf8, 0x82, 0xf0, - 0xbb, 0xab, 0x30, 0xdb, 0x35, 0x6e, 0x9d, 0xf2, 0x8b, 0x64, 0x67, 0xcc, 0x1b, 0x89, 0x33, 0x94, - 0x88, 0x40, 0x95, 0x2a, 0x49, 0x3a, 0xba, 0x5c, 0xce, 0xed, 0xb4, 0xda, 0x86, 0x54, 0x93, 0x01, - 0x64, 0x38, 0x95, 0x6c, 0xff, 0xc3, 0x12, 0x0e, 0xac, 0xe3, 0x0d, 0xba, 0x68, 0xc4, 0x34, 0x4b, - 0x1c, 0xc7, 0xec, 0x98, 0x78, 0x74, 0x4a, 0x20, 0x28, 0xfc, 0x5f, 0x04, 0x82, 0x6b, 0x95, 0x5f, - 0xbd, 0xdb, 0x9c, 0x7a, 0xeb, 0xaf, 0x4b, 0x53, 0x76, 0x0f, 0xea, 0xab, 0x94, 0x38, 0x31, 0xb9, - 0x15, 0xc5, 0x62, 0x01, 0x36, 0x94, 0x5d, 0x7a, 0x88, 0xfb, 0x81, 0x5a, 0x28, 0xf0, 0xfb, 0xbd, - 0x26, 0x20, 0x58, 0x61, 0xf8, 0xf9, 0xed, 0x7a, 0xc4, 0x77, 0x37, 0x9d, 0xc0, 0xe9, 0x12, 0xaa, - 0x6e, 0x60, 0xb2, 0xab, 0xd7, 0x0d, 0x1c, 0xce, 0x50, 0xda, 0x3f, 0x2d, 0x42, 0x7d, 0x8d, 0xf8, - 0x24, 0xd5, 0x77, 0x1d, 0x50, 0x97, 0x3a, 0x1d, 0xb2, 0x45, 0xa8, 0x17, 0xba, 0xdb, 0xa4, 0x13, - 0x06, 0x2e, 0x13, 0x1e, 0x51, 0x6c, 0x7f, 0x8e, 0xfb, 0xd9, 0x8d, 0x11, 0x2c, 0x3e, 0x81, 0x03, - 0xf9, 0x50, 0x8f, 0xa8, 0x78, 0xf6, 0x62, 0x95, 0x7b, 0xf8, 0x9d, 0x7f, 0x3e, 0xdf, 0x56, 0x6f, - 0x99, 0xac, 0xed, 0xf9, 0xa3, 0x41, 0xb3, 0x9e, 0x01, 0xe1, 0xac, 0x70, 0xf4, 0x4d, 0x98, 0x0b, - 0x69, 0xb4, 0xe7, 0x04, 0x6b, 0x24, 0x22, 0x81, 0x4b, 0x82, 0x98, 0x89, 0x5d, 0xa8, 0xb4, 0xcf, - 0xf3, 0x8c, 0x71, 0x6b, 0x08, 0x87, 0x47, 0xa8, 0xd1, 0x3d, 0x98, 0x8f, 0x68, 0x18, 0x39, 0x5d, - 0x87, 0x4b, 0xdc, 0x0a, 0x7d, 0xaf, 0x73, 0x28, 0xe2, 0x54, 0xb5, 0x7d, 0xe9, 0x68, 0xd0, 0x9c, - 0xdf, 0x1a, 0x46, 0x1e, 0x0f, 0x9a, 0xe7, 0xc4, 0xd6, 0x71, 0x48, 0x8a, 0xc4, 0xa3, 0x62, 0x8c, - 0x33, 0x2c, 0x8d, 0x3b, 0x43, 0x7b, 0x03, 0x2a, 0x6b, 0x7d, 0x2a, 0xb8, 0xd0, 0xcb, 0x50, 0x71, - 0xd5, 0xb3, 0xda, 0xf9, 0x27, 0x75, 0xca, 0xd5, 0x34, 0xc7, 0x83, 0x66, 0x9d, 0x17, 0x09, 0x2d, - 0x0d, 0xc0, 0x09, 0x8b, 0x7d, 0x1f, 0xea, 0xeb, 0x0f, 0xa3, 0x90, 0xc6, 0xfa, 0x4c, 0x9f, 0x86, - 0x32, 0x11, 0x00, 0x21, 0xad, 0x92, 0xe6, 0x09, 0x49, 0x86, 0x15, 0x96, 0xc7, 0x2d, 0xf2, 0xd0, - 0xe9, 0xc4, 0x2a, 0xe0, 0x27, 0x71, 0x6b, 0x9d, 0x03, 0xb1, 0xc4, 0xd9, 0xef, 0x5b, 0x50, 0x16, - 0x1e, 0xc5, 0xd0, 0x6d, 0x28, 0xf6, 0x9c, 0x48, 0x25, 0xab, 0x17, 0xf2, 0x9d, 0xac, 0x64, 0x6d, - 0x6d, 0x3a, 0xd1, 0x7a, 0x10, 0xd3, 0xc3, 0x76, 0x4d, 0x29, 0x29, 0x6e, 0x3a, 0x11, 0xe6, 0xe2, - 0x16, 0x5c, 0xa8, 0x68, 0x2c, 0x9a, 0x83, 0xe2, 0x3e, 0x39, 0x94, 0x01, 0x09, 0xf3, 0x47, 0xd4, - 0x86, 0xd2, 0x81, 0xe3, 0xf7, 0x89, 0xf2, 0xa7, 0x4b, 0x93, 0x68, 0xc5, 0x92, 0xf5, 0x5a, 0xe1, - 0xaa, 0x65, 0xdf, 0x02, 0xb8, 0x41, 0x92, 0x1d, 0x5a, 0x81, 0xb3, 0x3a, 0xda, 0x64, 0x83, 0xe0, - 0xe7, 0x95, 0x79, 0x67, 0x71, 0x16, 0x8d, 0x87, 0xe9, 0xed, 0xfb, 0x50, 0x15, 0x81, 0x92, 0xe7, - 0xbb, 0x34, 0x03, 0x58, 0x8f, 0xc8, 0x00, 0x3a, 0x61, 0x16, 0xc6, 0x25, 0x4c, 0x23, 0x2e, 0xf8, - 0x50, 0x97, 0xbc, 0x3a, 0x87, 0xe7, 0xd2, 0x70, 0x09, 0x2a, 0xda, 0x4c, 0xa5, 0x25, 0xa9, 0xdd, - 0xb4, 0x20, 0x9c, 0x50, 0x18, 0xda, 0xf6, 0x20, 0x13, 0xf4, 0xf3, 0x29, 0x33, 0x12, 0x5a, 0xe1, - 0xd1, 0x09, 0xcd, 0xd0, 0xf4, 0x23, 0x68, 0x8c, 0x2b, 0xf8, 0x1e, 0x23, 0x2d, 0xe5, 0x37, 0xc5, - 0x7e, 0xdb, 0x82, 0x39, 0x53, 0x52, 0xfe, 0xe3, 0xcb, 0xaf, 0xe4, 0xf4, 0xd2, 0xc8, 0xd8, 0x91, - 0x5f, 0x5b, 0x70, 0x3e, 0xb3, 0xb4, 0x89, 0x4e, 0x7c, 0x02, 0xa3, 0x4c, 0xe7, 0x28, 0x4e, 0xe0, - 0x1c, 0xcb, 0x50, 0xdb, 0x08, 0xbc, 0xd8, 0x73, 0x7c, 0xef, 0x07, 0x84, 0x9e, 0x5e, 0x4c, 0xda, - 0x7f, 0xb0, 0x60, 0xd6, 0xe0, 0x60, 0xe8, 0x3e, 0xcc, 0xf0, 0xb8, 0xeb, 0x05, 0x5d, 0x15, 0x3b, - 0x72, 0xd6, 0x0c, 0x86, 0x90, 0x74, 0x5d, 0x5b, 0x52, 0x12, 0xd6, 0x22, 0xd1, 0x16, 0x94, 0x29, - 0x61, 0x7d, 0x3f, 0x9e, 0x2c, 0x44, 0x6c, 0xc7, 0x4e, 0xdc, 0x67, 0x32, 0x36, 0x63, 0xc1, 0x8f, - 0x95, 0x1c, 0xfb, 0xcf, 0x05, 0xa8, 0xdf, 0x74, 0x76, 0x88, 0xbf, 0x4d, 0x7c, 0xd2, 0x89, 0x43, - 0x8a, 0x7e, 0x08, 0xb5, 0x9e, 0x13, 0x77, 0xf6, 0x04, 0x54, 0x97, 0xeb, 0x6b, 0xf9, 0x14, 0x65, - 0x24, 0xb5, 0x36, 0x53, 0x31, 0x32, 0x20, 0x9e, 0x53, 0x0b, 0xab, 0x19, 0x18, 0x6c, 0x6a, 0x13, - 0x3d, 0x96, 0x78, 0x5f, 0x7f, 0x18, 0xf1, 0x5a, 0x62, 0xf2, 0xd6, 0x2e, 0x63, 0x02, 0x26, 0x6f, - 0xf6, 0x3d, 0x4a, 0x7a, 0x24, 0x88, 0xd3, 0x1e, 0x6b, 0x73, 0x48, 0x3e, 0x1e, 0xd1, 0xb8, 0xf0, - 0x0a, 0xcc, 0x0d, 0x1b, 0x7f, 0x42, 0xbc, 0x3e, 0x6f, 0xc6, 0xeb, 0xaa, 0x19, 0x81, 0x7f, 0x6b, - 0x41, 0x63, 0x9c, 0x21, 0xe8, 0x8b, 0x86, 0xa0, 0x34, 0x47, 0xbc, 0x46, 0x0e, 0xa5, 0xd4, 0x75, - 0xa8, 0x84, 0x11, 0xef, 0x8a, 0x43, 0xaa, 0xfc, 0xfc, 0x19, 0xed, 0xbb, 0xb7, 0x14, 0xfc, 0x78, - 0xd0, 0xbc, 0x90, 0x11, 0xaf, 0x11, 0x38, 0x61, 0xe5, 0x89, 0x59, 0xd8, 0xc3, 0x8b, 0x85, 0x24, - 0x31, 0xdf, 0x15, 0x10, 0xac, 0x30, 0xf6, 0xef, 0x2d, 0x98, 0x16, 0x55, 0xf2, 0x7d, 0xa8, 0xf0, - 0xfd, 0x73, 0x9d, 0xd8, 0x11, 0x76, 0xe5, 0xee, 0xcf, 0x38, 0xf7, 0x26, 0x89, 0x9d, 0xf4, 0x7e, - 0x69, 0x08, 0x4e, 0x24, 0x22, 0x0c, 0x25, 0x2f, 0x26, 0x3d, 0x7d, 0x90, 0xcf, 0x8e, 0x15, 0xad, - 0xa6, 0x03, 0x2d, 0xec, 0x3c, 0x58, 0x7f, 0x18, 0x93, 0x80, 0x1f, 0x46, 0x1a, 0x0c, 0x36, 0xb8, - 0x0c, 0x2c, 0x45, 0xd9, 0xff, 0xb6, 0x20, 0x51, 0xc5, 0xaf, 0x3b, 0x23, 0xfe, 0xee, 0x4d, 0x2f, - 0xd8, 0x57, 0xdb, 0x9a, 0x98, 0xb3, 0xad, 0xe0, 0x38, 0xa1, 0x38, 0x29, 0x21, 0x16, 0x26, 0x4b, - 0x88, 0x5c, 0x61, 0x27, 0x0c, 0x62, 0x2f, 0xe8, 0x8f, 0xc4, 0x97, 0x55, 0x05, 0xc7, 0x09, 0x05, - 0xaf, 0x3b, 0x29, 0xe9, 0x39, 0x5e, 0xe0, 0x05, 0x5d, 0xbe, 0x88, 0xd5, 0xb0, 0x1f, 0xc4, 0xa2, - 0x00, 0x53, 0x75, 0x27, 0x1e, 0xc1, 0xe2, 0x13, 0x38, 0xec, 0x3f, 0x15, 0xa1, 0xc6, 0xd7, 0xac, - 0x33, 0xfb, 0x4b, 0x50, 0xf7, 0x4d, 0x2f, 0x50, 0x6b, 0xbf, 0xa0, 0x4c, 0xc9, 0xde, 0x6b, 0x9c, - 0xa5, 0xe5, 0xcc, 0xa2, 0x5c, 0x4e, 0x98, 0x0b, 0x59, 0xe6, 0xeb, 0x26, 0x12, 0x67, 0x69, 0x79, - 0xbc, 0x7e, 0xc0, 0xef, 0x87, 0x2a, 0x44, 0x93, 0x23, 0xfa, 0x36, 0x07, 0x62, 0x89, 0x3b, 0x69, - 0x9f, 0xa7, 0x27, 0xdc, 0xe7, 0x6b, 0x70, 0x86, 0x3b, 0x44, 0xd8, 0x8f, 0x75, 0xb5, 0x5e, 0x12, - 0xbb, 0x86, 0x8e, 0x06, 0xcd, 0x33, 0xb7, 0x33, 0x18, 0x3c, 0x44, 0xc9, 0x6d, 0xf4, 0xbd, 0x9e, - 0x17, 0x37, 0x66, 0x04, 0x4b, 0x62, 0xe3, 0x4d, 0x0e, 0xc4, 0x12, 0x97, 0x39, 0xc8, 0xca, 0xa9, - 0x07, 0xb9, 0x09, 0xe7, 0x1c, 0xdf, 0x0f, 0x1f, 0x88, 0x65, 0xb6, 0xc3, 0x70, 0xbf, 0xe7, 0xd0, - 0x7d, 0x26, 0x7a, 0xdc, 0x4a, 0xfb, 0x0b, 0x8a, 0xf1, 0xdc, 0xca, 0x28, 0x09, 0x3e, 0x89, 0xcf, - 0xfe, 0x67, 0x01, 0x90, 0xec, 0x56, 0x5c, 0x59, 0xc4, 0xc9, 0x40, 0xf3, 0x0c, 0xcc, 0xf4, 0x54, - 0xb7, 0x63, 0x65, 0xf3, 0x9c, 0x6e, 0x74, 0x34, 0x1e, 0x6d, 0x42, 0x55, 0x5e, 0xf8, 0xd4, 0x89, - 0x97, 0x15, 0x71, 0xf5, 0x96, 0x46, 0x1c, 0x0f, 0x9a, 0x0b, 0x19, 0x35, 0x09, 0xe6, 0xf6, 0x61, - 0x44, 0x70, 0x2a, 0x01, 0x5d, 0x01, 0x70, 0x22, 0xcf, 0x1c, 0x6d, 0x55, 0xd3, 0xd1, 0x48, 0xda, - 0xa4, 0x62, 0x83, 0x0a, 0xbd, 0x0a, 0xd3, 0x7c, 0xe3, 0xd5, 0xdc, 0xe3, 0xcb, 0xf9, 0xc2, 0x06, - 0x3f, 0xba, 0x76, 0x85, 0xe7, 0x52, 0xfe, 0x84, 0x85, 0x04, 0x74, 0x0f, 0xca, 0xc2, 0xcb, 0xe4, - 0x21, 0x4f, 0x58, 0xff, 0x8a, 0x66, 0x48, 0x15, 0xef, 0xc7, 0xc9, 0x13, 0x56, 0x12, 0xed, 0x37, - 0xa1, 0xba, 0xe9, 0x75, 0x68, 0xc8, 0xd5, 0xf1, 0x0d, 0x66, 0x99, 0xe6, 0x2f, 0xd9, 0x60, 0xed, - 0x4b, 0x1a, 0xcf, 0x9d, 0x28, 0x70, 0x82, 0x50, 0xb6, 0x78, 0xa5, 0xd4, 0x89, 0x5e, 0xe7, 0x40, - 0x2c, 0x71, 0xd7, 0xce, 0xf3, 0xfa, 0xe1, 0x67, 0xef, 0x35, 0xa7, 0xde, 0x79, 0xaf, 0x39, 0xf5, - 0xee, 0x7b, 0xaa, 0x96, 0xf8, 0x7b, 0x0d, 0xe0, 0xd6, 0xce, 0xf7, 0x49, 0x47, 0xc6, 0xa8, 0xd3, - 0x07, 0x53, 0xbc, 0x26, 0x54, 0xf3, 0x50, 0x31, 0xc4, 0x29, 0x0c, 0xd5, 0x84, 0x06, 0x0e, 0x67, - 0x28, 0xd1, 0x32, 0x54, 0x93, 0x61, 0x95, 0x3a, 0xb6, 0x79, 0xed, 0x06, 0xc9, 0x44, 0x0b, 0xa7, - 0x34, 0x99, 0x80, 0x39, 0x7d, 0x6a, 0xc0, 0x6c, 0x43, 0xb1, 0xef, 0xb9, 0xe2, 0x54, 0xaa, 0xed, - 0xe7, 0x74, 0xc2, 0xba, 0xb3, 0xb1, 0x76, 0x3c, 0x68, 0x3e, 0x39, 0x6e, 0xd2, 0x1b, 0x1f, 0x46, - 0x84, 0xb5, 0xee, 0x6c, 0xac, 0x61, 0xce, 0x7c, 0x52, 0x30, 0x28, 0x4f, 0x18, 0x0c, 0xae, 0x00, - 0xa8, 0x55, 0x73, 0x6e, 0x79, 0xab, 0x13, 0xef, 0xbc, 0x91, 0x60, 0xb0, 0x41, 0x85, 0x18, 0xcc, - 0x77, 0x28, 0x91, 0xce, 0xee, 0xf5, 0x08, 0x8b, 0x9d, 0x9e, 0x1c, 0x5d, 0x4d, 0xe6, 0xaa, 0x4f, - 0x28, 0x35, 0xf3, 0xab, 0xc3, 0xc2, 0xf0, 0xa8, 0x7c, 0x14, 0xc2, 0xbc, 0xab, 0xba, 0xe7, 0x54, - 0x69, 0x75, 0x62, 0xa5, 0x17, 0xb8, 0xc2, 0xb5, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x1e, 0x2c, - 0x68, 0xe0, 0xe8, 0x08, 0x43, 0x0c, 0xd3, 0x8a, 0xed, 0xc5, 0xa3, 0x41, 0x73, 0x61, 0x6d, 0x2c, - 0x15, 0x7e, 0x84, 0x04, 0xe4, 0x42, 0xd9, 0x97, 0xd5, 0x60, 0x4d, 0x64, 0xf0, 0xaf, 0xe7, 0x5b, - 0x45, 0xea, 0xfd, 0x2d, 0xb3, 0x0a, 0x4c, 0x5a, 0x74, 0x55, 0x00, 0x2a, 0xd9, 0xe8, 0x21, 0xd4, - 0x9c, 0x20, 0x08, 0x63, 0x47, 0x0e, 0x55, 0x66, 0x85, 0xaa, 0x95, 0x89, 0x55, 0xad, 0xa4, 0x32, - 0x86, 0xaa, 0x4e, 0x03, 0x83, 0x4d, 0x55, 0xe8, 0x01, 0x9c, 0x0d, 0x1f, 0x04, 0x84, 0x62, 0xb2, - 0x4b, 0x28, 0x09, 0x3a, 0x84, 0x35, 0xea, 0x42, 0xfb, 0x57, 0x73, 0x6a, 0xcf, 0x30, 0xa7, 0x2e, - 0x9d, 0x85, 0x33, 0x3c, 0xac, 0x05, 0xb5, 0x00, 0x76, 0xbd, 0x40, 0xf5, 0x0e, 0x8d, 0x33, 0xe9, - 0xf4, 0xf5, 0x7a, 0x02, 0xc5, 0x06, 0x05, 0x7a, 0x01, 0x6a, 0x1d, 0xbf, 0xcf, 0x62, 0x22, 0xc7, - 0xbc, 0x67, 0xc5, 0x0d, 0x4a, 0xd6, 0xb7, 0x9a, 0xa2, 0xb0, 0x49, 0x87, 0xf6, 0x60, 0xd6, 0x33, - 0x9a, 0x94, 0xc6, 0x9c, 0xf0, 0xc5, 0x2b, 0x13, 0x77, 0x26, 0xac, 0x3d, 0xc7, 0x23, 0x91, 0x09, - 0xc1, 0x19, 0xc9, 0xa8, 0x0f, 0xf5, 0x9e, 0x99, 0x6a, 0x1a, 0xf3, 0x62, 0x1f, 0xaf, 0xe6, 0x53, - 0x35, 0x9a, 0x0c, 0xd3, 0x7a, 0x24, 0x83, 0xc3, 0x59, 0x2d, 0x0b, 0x5f, 0x83, 0xda, 0x7f, 0x59, - 0xaa, 0xf3, 0x52, 0x7f, 0xd8, 0x63, 0x26, 0x2a, 0xf5, 0xdf, 0x2f, 0xc0, 0x99, 0xec, 0x39, 0x27, - 0x2d, 0xb1, 0x35, 0xf6, 0x6b, 0x81, 0x4e, 0x06, 0xc5, 0xb1, 0xc9, 0x40, 0xc5, 0xdc, 0xe9, 0xc7, - 0x89, 0xb9, 0xd9, 0x74, 0x5e, 0xca, 0x95, 0xce, 0x5b, 0x00, 0xbc, 0xdc, 0xa1, 0xa1, 0xef, 0x13, - 0x2a, 0x42, 0x74, 0x45, 0x7d, 0x0f, 0x48, 0xa0, 0xd8, 0xa0, 0xe0, 0xb5, 0xed, 0x8e, 0x1f, 0x76, - 0xf6, 0xc5, 0x16, 0xe8, 0xf0, 0x22, 0x82, 0x73, 0x45, 0xd6, 0xb6, 0xed, 0x11, 0x2c, 0x3e, 0x81, - 0xc3, 0x3e, 0x84, 0x0b, 0x5b, 0x0e, 0xe5, 0x8e, 0x94, 0x5e, 0x65, 0xd1, 0x3c, 0xbc, 0x31, 0xd2, - 0x9a, 0x3c, 0x37, 0x69, 0x48, 0x48, 0x17, 0x9d, 0xc2, 0xd2, 0xf6, 0xc4, 0xfe, 0x8b, 0x05, 0x4f, - 0x9c, 0xa8, 0xfb, 0x33, 0x68, 0x8d, 0xde, 0xc8, 0xb6, 0x46, 0x2f, 0xe5, 0x1c, 0x21, 0x9f, 0x64, - 0xed, 0x98, 0x46, 0x69, 0x06, 0x4a, 0x5b, 0xbc, 0xec, 0xb4, 0x7f, 0x69, 0xc1, 0xac, 0x78, 0x9a, - 0x64, 0xfc, 0xde, 0x84, 0xd2, 0x6e, 0xa8, 0x47, 0x6c, 0x15, 0xf9, 0xa5, 0xea, 0x3a, 0x07, 0x60, - 0x09, 0x7f, 0x8c, 0xf9, 0xfc, 0xdb, 0x16, 0x64, 0x07, 0xdf, 0xe8, 0x15, 0xe9, 0xf3, 0x56, 0x32, - 0x99, 0x9e, 0xd0, 0xdf, 0x5f, 0x1e, 0xd7, 0xd8, 0x9d, 0xcb, 0x35, 0xe5, 0xbc, 0x04, 0x55, 0x1c, - 0x86, 0xf1, 0x96, 0x13, 0xef, 0x31, 0xbe, 0xf0, 0x88, 0x3f, 0xa8, 0xbd, 0x11, 0x0b, 0x17, 0x18, - 0x2c, 0xe1, 0xf6, 0x2f, 0x2c, 0x78, 0x62, 0xec, 0x27, 0x11, 0x7e, 0xf5, 0x3a, 0xc9, 0x9b, 0x5a, - 0x51, 0xe2, 0x85, 0x29, 0x1d, 0x36, 0xa8, 0x78, 0x47, 0x96, 0xf9, 0x8e, 0x32, 0xdc, 0x91, 0x65, - 0xb4, 0xe1, 0x2c, 0xad, 0xfd, 0xaf, 0x02, 0x94, 0xe5, 0x98, 0xe7, 0x7f, 0xec, 0xb1, 0x4f, 0x43, - 0x99, 0x09, 0x3d, 0xca, 0xbc, 0x24, 0x9b, 0x4b, 0xed, 0x58, 0x61, 0x45, 0x17, 0x43, 0x18, 0x73, - 0xba, 0x3a, 0xca, 0xa5, 0x5d, 0x8c, 0x04, 0x63, 0x8d, 0x47, 0x2f, 0x42, 0x99, 0x12, 0x87, 0x25, - 0xfd, 0xe1, 0xa2, 0x16, 0x89, 0x05, 0xf4, 0x78, 0xd0, 0x9c, 0x55, 0xc2, 0xc5, 0x3b, 0x56, 0xd4, - 0xe8, 0x1e, 0xcc, 0xb8, 0x24, 0x76, 0x3c, 0x5f, 0x77, 0x0c, 0xcf, 0x4f, 0x32, 0x0e, 0x5b, 0x93, - 0xac, 0xed, 0x1a, 0xb7, 0x49, 0xbd, 0x60, 0x2d, 0x90, 0x47, 0xe8, 0x4e, 0xe8, 0xca, 0x2f, 0xa9, - 0xa5, 0x34, 0x42, 0xaf, 0x86, 0x2e, 0xc1, 0x02, 0x63, 0xbf, 0x63, 0x41, 0x4d, 0x4a, 0x5a, 0x75, - 0xfa, 0x8c, 0xa0, 0xcb, 0xc9, 0x2a, 0xe4, 0x71, 0xeb, 0x9a, 0x71, 0x9a, 0x77, 0x59, 0xc7, 0x83, - 0x66, 0x55, 0x90, 0x89, 0x96, 0x4b, 0x2f, 0xc0, 0xd8, 0xa3, 0xc2, 0x29, 0x7b, 0xf4, 0x14, 0x94, - 0xc4, 0xed, 0x51, 0x9b, 0x99, 0xdc, 0x75, 0x71, 0xc1, 0xb0, 0xc4, 0xd9, 0x1f, 0x17, 0xa0, 0x9e, - 0x59, 0x5c, 0x8e, 0xae, 0x23, 0x19, 0xbd, 0x16, 0x72, 0x8c, 0xf3, 0xc7, 0x7f, 0xff, 0xfe, 0x0e, - 0x94, 0x3b, 0x7c, 0x7d, 0xfa, 0x0f, 0x08, 0x97, 0x27, 0x39, 0x0a, 0xb1, 0x33, 0xa9, 0x27, 0x89, - 0x57, 0x86, 0x95, 0x40, 0x74, 0x03, 0xe6, 0x29, 0x89, 0xe9, 0xe1, 0xca, 0x6e, 0x4c, 0xa8, 0x39, - 0x07, 0x28, 0xa5, 0x75, 0x39, 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0x3a, 0xa7, 0x96, 0x1f, 0x23, 0xa7, - 0xda, 0x3b, 0x30, 0x7b, 0xdb, 0xd9, 0xf1, 0x93, 0x6f, 0x8a, 0x18, 0xea, 0x5e, 0xd0, 0xf1, 0xfb, - 0x2e, 0x91, 0xd1, 0x58, 0x47, 0x2f, 0x7d, 0x69, 0x37, 0x4c, 0xe4, 0xf1, 0xa0, 0x79, 0x2e, 0x03, - 0x90, 0x1f, 0xd1, 0x70, 0x56, 0x84, 0xed, 0xc3, 0xf4, 0x67, 0xd8, 0xa7, 0x7e, 0x17, 0xaa, 0x69, - 0x27, 0xf1, 0x29, 0xab, 0xb4, 0xdf, 0x80, 0x0a, 0xf7, 0x78, 0xdd, 0x01, 0x9f, 0x52, 0x16, 0x65, - 0x0b, 0x96, 0x42, 0x9e, 0x82, 0xc5, 0xee, 0x41, 0xfd, 0x4e, 0xe4, 0x3e, 0xe6, 0x57, 0xe5, 0x42, - 0xee, 0xac, 0x75, 0x05, 0xe4, 0x3f, 0x35, 0x78, 0x82, 0x90, 0x99, 0xdb, 0x48, 0x10, 0x66, 0xe2, - 0x35, 0xbe, 0x2a, 0xfc, 0xc4, 0x02, 0x10, 0xa3, 0x9f, 0xf5, 0x03, 0x12, 0xc4, 0x7c, 0x1f, 0xb8, - 0x53, 0x0d, 0xef, 0x83, 0x88, 0x0c, 0x02, 0x83, 0xee, 0x40, 0x39, 0x94, 0xde, 0x24, 0xc7, 0xfc, - 0x13, 0x4e, 0x4c, 0x93, 0x8b, 0x24, 0xfd, 0x09, 0x2b, 0x61, 0xed, 0x8b, 0x1f, 0x7c, 0xb2, 0x38, - 0xf5, 0xe1, 0x27, 0x8b, 0x53, 0x1f, 0x7d, 0xb2, 0x38, 0xf5, 0xd6, 0xd1, 0xa2, 0xf5, 0xc1, 0xd1, - 0xa2, 0xf5, 0xe1, 0xd1, 0xa2, 0xf5, 0xd1, 0xd1, 0xa2, 0xf5, 0xf1, 0xd1, 0xa2, 0xf5, 0xce, 0xdf, - 0x16, 0xa7, 0xee, 0x15, 0x0e, 0x2e, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xe0, 0x33, 0x2e, - 0x95, 0x26, 0x00, 0x00, + // 2465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, + 0xf5, 0x4f, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa8, 0xcd, 0xfe, 0xff, 0xde, 0x08, 0xec, 0x6c, + 0x2f, 0x5a, 0x65, 0x61, 0xd6, 0x26, 0x59, 0x58, 0x0d, 0x03, 0x2c, 0xc4, 0x71, 0x66, 0x14, 0xed, + 0x64, 0xc6, 0xaa, 0xec, 0x0c, 0x62, 0x18, 0x21, 0x3a, 0xdd, 0x15, 0xa7, 0x49, 0xbb, 0xdb, 0x5b, + 0xd5, 0xce, 0x8c, 0xe1, 0xc0, 0x1e, 0x40, 0x70, 0x40, 0x68, 0x8e, 0x9c, 0xd0, 0x8e, 0xe0, 0xc2, + 0x95, 0x13, 0x17, 0x38, 0x21, 0x31, 0xc7, 0x91, 0xb8, 0xec, 0x01, 0x59, 0x3b, 0xe6, 0xc0, 0x09, + 0x71, 0xcf, 0x09, 0x55, 0x75, 0x75, 0x75, 0xb7, 0x1d, 0x4f, 0xda, 0x3b, 0xbb, 0x88, 0x53, 0xd2, + 0xef, 0xe3, 0xf7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0xd5, 0x33, 0x1c, 0x9c, 0x5e, 0x65, 0x75, 0xdb, + 0x6b, 0x9c, 0xf6, 0x8f, 0x08, 0x75, 0x89, 0x4f, 0x58, 0xe3, 0x8c, 0xb8, 0x96, 0x47, 0x1b, 0x92, + 0x61, 0xf4, 0xec, 0xae, 0x61, 0x9e, 0xd8, 0x2e, 0xa1, 0x83, 0x46, 0xef, 0xb4, 0xc3, 0x09, 0xac, + 0xd1, 0x25, 0xbe, 0xd1, 0x38, 0xdb, 0x6a, 0x74, 0x88, 0x4b, 0xa8, 0xe1, 0x13, 0xab, 0xde, 0xa3, + 0x9e, 0xef, 0xa1, 0x2f, 0x04, 0x5a, 0xf5, 0xb8, 0x56, 0xbd, 0x77, 0xda, 0xe1, 0x04, 0x56, 0xe7, + 0x5a, 0xf5, 0xb3, 0xad, 0xf5, 0x37, 0x3b, 0xb6, 0x7f, 0xd2, 0x3f, 0xaa, 0x9b, 0x5e, 0xb7, 0xd1, + 0xf1, 0x3a, 0x5e, 0x43, 0x28, 0x1f, 0xf5, 0x8f, 0xc5, 0x97, 0xf8, 0x10, 0xff, 0x05, 0xa0, 0xeb, + 0x53, 0x5d, 0xa1, 0x7d, 0xd7, 0xb7, 0xbb, 0x64, 0xdc, 0x8b, 0xf5, 0xb7, 0x2f, 0x53, 0x60, 0xe6, + 0x09, 0xe9, 0x1a, 0xe3, 0x7a, 0xfa, 0x5f, 0xb3, 0x50, 0xd8, 0x69, 0xef, 0xdf, 0xa0, 0x5e, 0xbf, + 0x87, 0x36, 0x60, 0xde, 0x35, 0xba, 0xa4, 0xa2, 0x6d, 0x68, 0x9b, 0xc5, 0xe6, 0xe2, 0x93, 0x61, + 0x6d, 0x6e, 0x34, 0xac, 0xcd, 0xdf, 0x32, 0xba, 0x04, 0x0b, 0x0e, 0x72, 0xa0, 0x70, 0x46, 0x28, + 0xb3, 0x3d, 0x97, 0x55, 0x32, 0x1b, 0xd9, 0xcd, 0xd2, 0xf6, 0x3b, 0xf5, 0x34, 0xeb, 0xaf, 0x0b, + 0x03, 0x77, 0x03, 0xd5, 0xeb, 0x1e, 0x6d, 0xd9, 0xcc, 0xf4, 0xce, 0x08, 0x1d, 0x34, 0x57, 0xa4, + 0x95, 0x82, 0x64, 0x32, 0xac, 0x2c, 0xa0, 0x9f, 0x6a, 0xb0, 0xd2, 0xa3, 0xe4, 0x98, 0x50, 0x4a, + 0x2c, 0xc9, 0xaf, 0x64, 0x37, 0xb4, 0x4f, 0xc1, 0x6c, 0x45, 0x9a, 0x5d, 0x69, 0x8f, 0xe1, 0xe3, + 0x09, 0x8b, 0xe8, 0xb7, 0x1a, 0xac, 0x33, 0x42, 0xcf, 0x08, 0xdd, 0xb1, 0x2c, 0x4a, 0x18, 0x6b, + 0x0e, 0x76, 0x1d, 0x9b, 0xb8, 0xfe, 0xee, 0x7e, 0x0b, 0xb3, 0xca, 0xbc, 0xd8, 0x87, 0x6f, 0xa5, + 0x73, 0xe8, 0x70, 0x1a, 0x4e, 0x53, 0x97, 0x1e, 0xad, 0x4f, 0x15, 0x61, 0xf8, 0x39, 0x6e, 0xe8, + 0xc7, 0xb0, 0x18, 0x1e, 0xe4, 0x4d, 0x9b, 0xf9, 0xe8, 0x2e, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x34, + 0xe1, 0x60, 0x3d, 0x9d, 0x83, 0x21, 0x46, 0x73, 0x49, 0xfa, 0x93, 0x17, 0x9f, 0x0c, 0x4b, 0x34, + 0xfd, 0x4f, 0x59, 0x28, 0xed, 0xb4, 0xf7, 0x31, 0x61, 0x5e, 0x9f, 0x9a, 0x24, 0x45, 0xd0, 0x6c, + 0x03, 0xf0, 0xbf, 0xac, 0x67, 0x98, 0xc4, 0xaa, 0x64, 0x36, 0xb4, 0xcd, 0x42, 0x13, 0x49, 0x39, + 0xb8, 0xa5, 0x38, 0x38, 0x26, 0xc5, 0x51, 0x4f, 0x6d, 0xd7, 0x12, 0xa7, 0x1d, 0x43, 0x7d, 0xd7, + 0x76, 0x2d, 0x2c, 0x38, 0xe8, 0x26, 0xe4, 0xce, 0x08, 0x3d, 0xe2, 0xfb, 0xcf, 0x03, 0xe2, 0x4b, + 0xe9, 0x96, 0x77, 0x97, 0xab, 0x34, 0x8b, 0xa3, 0x61, 0x2d, 0x27, 0xfe, 0xc5, 0x01, 0x08, 0xaa, + 0x03, 0xb0, 0x13, 0x8f, 0xfa, 0xc2, 0x9d, 0x4a, 0x6e, 0x23, 0xbb, 0x59, 0x6c, 0x2e, 0x71, 0xff, + 0x0e, 0x15, 0x15, 0xc7, 0x24, 0xd0, 0x55, 0x58, 0x64, 0xb6, 0xdb, 0xe9, 0x3b, 0x06, 0xe5, 0x84, + 0x4a, 0x5e, 0xf8, 0xb9, 0x26, 0xfd, 0x5c, 0x3c, 0x8c, 0xf1, 0x70, 0x42, 0x92, 0x5b, 0x32, 0x0d, + 0x9f, 0x74, 0x3c, 0x6a, 0x13, 0x56, 0x59, 0x88, 0x2c, 0xed, 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xd7, + 0x20, 0x27, 0x76, 0xbe, 0x52, 0x10, 0x26, 0xca, 0xd2, 0x44, 0x4e, 0x1c, 0x0b, 0x0e, 0x78, 0xe8, + 0x0d, 0x58, 0x90, 0xb7, 0xa6, 0x52, 0x14, 0x62, 0xcb, 0x52, 0x6c, 0x21, 0x0c, 0xeb, 0x90, 0xaf, + 0xff, 0x41, 0x83, 0xe5, 0xd8, 0xf9, 0x89, 0x58, 0xb9, 0x0a, 0x8b, 0x9d, 0xd8, 0x4d, 0x91, 0x67, + 0xa9, 0x56, 0x13, 0xbf, 0x45, 0x38, 0x21, 0x89, 0x08, 0x14, 0xa9, 0x44, 0x0a, 0x33, 0xc2, 0x56, + 0xea, 0x40, 0x0b, 0x7d, 0x88, 0x2c, 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x11, 0x74, + 0x61, 0x8e, 0x40, 0x9b, 0xb1, 0x3c, 0xa4, 0x89, 0x2d, 0x5c, 0x9c, 0x92, 0x43, 0x2e, 0xb9, 0xbc, + 0x99, 0xff, 0x89, 0xcb, 0x7b, 0xad, 0xf0, 0xeb, 0x0f, 0x6b, 0x73, 0x1f, 0xfc, 0x7d, 0x63, 0x4e, + 0xff, 0x99, 0x06, 0xe5, 0x5d, 0x4a, 0x0c, 0x9f, 0xdc, 0xee, 0xf9, 0x62, 0x05, 0x3a, 0xe4, 0x2d, + 0x3a, 0xc0, 0x7d, 0x57, 0xae, 0x14, 0xf8, 0xa5, 0x6c, 0x09, 0x0a, 0x96, 0x1c, 0xd4, 0x86, 0x35, + 0xdb, 0x35, 0x9d, 0xbe, 0x45, 0xee, 0xb8, 0xb6, 0x6b, 0xfb, 0xb6, 0xe1, 0xd8, 0x3f, 0x52, 0x97, + 0xed, 0x73, 0xd2, 0xbb, 0xb5, 0xfd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, + 0x11, 0x87, 0x44, 0x7e, 0x5c, 0x07, 0xd4, 0xa1, 0x86, 0x49, 0xda, 0x84, 0xda, 0x9e, 0x75, 0x48, + 0x4c, 0xcf, 0xb5, 0x98, 0x08, 0x95, 0x6c, 0xf3, 0xff, 0x46, 0xc3, 0x1a, 0xba, 0x31, 0xc1, 0xc5, + 0x17, 0x68, 0x20, 0x07, 0xca, 0x3d, 0x2a, 0xfe, 0xb7, 0x7d, 0x59, 0x48, 0xf8, 0x05, 0x7e, 0x2b, + 0xdd, 0x19, 0xb4, 0xe3, 0xaa, 0xcd, 0xd5, 0xd1, 0xb0, 0x56, 0x4e, 0x90, 0x70, 0x12, 0x1c, 0x7d, + 0x1b, 0x56, 0x3c, 0xda, 0x3b, 0x31, 0xdc, 0x16, 0xe9, 0x11, 0xd7, 0x22, 0xae, 0xcf, 0x44, 0x52, + 0x29, 0x34, 0xd7, 0x78, 0xfa, 0xbf, 0x3d, 0xc6, 0xc3, 0x13, 0xd2, 0xe8, 0x1e, 0xac, 0xf6, 0xa8, + 0xd7, 0x33, 0x3a, 0x06, 0x47, 0x6c, 0x7b, 0x8e, 0x6d, 0x0e, 0x44, 0xd2, 0x29, 0x36, 0xaf, 0x8c, + 0x86, 0xb5, 0xd5, 0xf6, 0x38, 0xf3, 0x7c, 0x58, 0x7b, 0x49, 0x6c, 0x1d, 0xa7, 0x44, 0x4c, 0x3c, + 0x09, 0x13, 0x3b, 0xdb, 0xdc, 0xb4, 0xb3, 0xd5, 0xf7, 0xa1, 0xd0, 0xea, 0x53, 0xa1, 0x85, 0xbe, + 0x09, 0x05, 0x4b, 0xfe, 0x2f, 0x77, 0xfe, 0xd5, 0xb0, 0x7e, 0x86, 0x32, 0xe7, 0xc3, 0x5a, 0x99, + 0x57, 0xfc, 0x7a, 0x48, 0xc0, 0x4a, 0x45, 0xbf, 0x0f, 0xe5, 0xbd, 0x87, 0x3d, 0x8f, 0xfa, 0xe1, + 0x99, 0xbe, 0x0e, 0x79, 0x22, 0x08, 0x02, 0xad, 0x10, 0x25, 0xfd, 0x40, 0x0c, 0x4b, 0x2e, 0x4f, + 0x42, 0xe4, 0xa1, 0x61, 0xfa, 0x32, 0xa0, 0x54, 0x12, 0xda, 0xe3, 0x44, 0x1c, 0xf0, 0xf4, 0xc7, + 0x1a, 0xc0, 0x0d, 0xa2, 0xb0, 0x77, 0x60, 0x39, 0xbc, 0xc0, 0xc9, 0xbc, 0xf2, 0xff, 0x52, 0x7b, + 0x19, 0x27, 0xd9, 0x78, 0x5c, 0xfe, 0x33, 0x08, 0xeb, 0xfb, 0x50, 0x14, 0xd9, 0x8c, 0x17, 0x92, + 0x28, 0xb5, 0x6a, 0xcf, 0x49, 0xad, 0x61, 0x25, 0xca, 0x4c, 0xab, 0x44, 0xb1, 0xcb, 0xeb, 0x40, + 0x39, 0xd0, 0x0d, 0x8b, 0x63, 0x2a, 0x0b, 0x57, 0xa0, 0x10, 0x2e, 0x5c, 0x5a, 0x51, 0x4d, 0x51, + 0x08, 0x84, 0x95, 0x44, 0xcc, 0xda, 0x09, 0x24, 0x32, 0x73, 0x3a, 0x63, 0xb1, 0x4a, 0x91, 0x79, + 0x7e, 0xa5, 0x88, 0x59, 0xfa, 0x09, 0x54, 0xa6, 0x75, 0x52, 0x2f, 0x50, 0x3b, 0xd2, 0xbb, 0xa2, + 0xff, 0x4a, 0x83, 0x95, 0x38, 0x52, 0xfa, 0xe3, 0x4b, 0x6f, 0xe4, 0xf2, 0x9e, 0x23, 0xb6, 0x23, + 0xbf, 0xd1, 0x60, 0x2d, 0xb1, 0xb4, 0x99, 0x4e, 0x7c, 0x06, 0xa7, 0xe2, 0xc1, 0x91, 0x9d, 0x21, + 0x38, 0x1a, 0x50, 0xda, 0x57, 0x71, 0x4f, 0x2f, 0xef, 0xd2, 0xf4, 0x3f, 0x6b, 0xb0, 0x18, 0xd3, + 0x60, 0xe8, 0x3e, 0x2c, 0xf0, 0x1c, 0x68, 0xbb, 0x1d, 0xd9, 0x41, 0xa6, 0x2c, 0xec, 0x31, 0x90, + 0x68, 0x5d, 0xed, 0x00, 0x09, 0x87, 0x90, 0xa8, 0x0d, 0x79, 0x4a, 0x58, 0xdf, 0xf1, 0x65, 0xfa, + 0xbf, 0x92, 0xb2, 0x04, 0xfb, 0x86, 0xdf, 0x67, 0x41, 0x9e, 0xc4, 0x42, 0x1f, 0x4b, 0x1c, 0xfd, + 0x6f, 0x19, 0x28, 0xdf, 0x34, 0x8e, 0x88, 0x73, 0x48, 0x1c, 0x62, 0xfa, 0x1e, 0x45, 0x3f, 0x86, + 0x52, 0xd7, 0xf0, 0xcd, 0x13, 0x41, 0x0d, 0xfb, 0xe0, 0x56, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x41, + 0x04, 0xb3, 0xe7, 0xfa, 0x74, 0xd0, 0x7c, 0x49, 0x2e, 0xac, 0x14, 0xe3, 0xe0, 0xb8, 0x35, 0xf1, + 0x78, 0x11, 0xdf, 0x7b, 0x0f, 0x7b, 0xbc, 0xe0, 0xcf, 0xfe, 0x66, 0x4a, 0xb8, 0x80, 0xc9, 0xfb, + 0x7d, 0x9b, 0x92, 0x2e, 0x71, 0xfd, 0xe8, 0xf1, 0x72, 0x30, 0x86, 0x8f, 0x27, 0x2c, 0xae, 0xbf, + 0x03, 0x2b, 0xe3, 0xce, 0xa3, 0x15, 0xc8, 0x9e, 0x92, 0x41, 0x10, 0x0b, 0x98, 0xff, 0x8b, 0xd6, + 0x20, 0x77, 0x66, 0x38, 0x7d, 0x99, 0x7f, 0x70, 0xf0, 0x71, 0x2d, 0x73, 0x55, 0xd3, 0x7f, 0xa7, + 0x41, 0x65, 0x9a, 0x23, 0xe8, 0xf3, 0x31, 0xa0, 0x66, 0x49, 0x7a, 0x95, 0x7d, 0x97, 0x0c, 0x02, + 0xd4, 0x3d, 0x28, 0x78, 0x3d, 0xfe, 0xdc, 0xf4, 0xa8, 0x8c, 0xf3, 0x37, 0xc2, 0xd8, 0xbd, 0x2d, + 0xe9, 0xe7, 0xc3, 0xda, 0xcb, 0x09, 0xf8, 0x90, 0x81, 0x95, 0x2a, 0x2f, 0x92, 0xc2, 0x1f, 0x5e, + 0xb8, 0x55, 0x91, 0xbc, 0x2b, 0x28, 0x58, 0x72, 0xf4, 0x3f, 0x6a, 0x30, 0x2f, 0x5a, 0xd9, 0xfb, + 0x50, 0xe0, 0xfb, 0x67, 0x19, 0xbe, 0x21, 0xfc, 0x4a, 0xfd, 0xf0, 0xe1, 0xda, 0x07, 0xc4, 0x37, + 0xa2, 0xfb, 0x15, 0x52, 0xb0, 0x42, 0x44, 0x18, 0x72, 0xb6, 0x4f, 0xba, 0xe1, 0x41, 0xbe, 0x39, + 0x15, 0x5a, 0x3e, 0xbb, 0xeb, 0xd8, 0x78, 0xb0, 0xf7, 0xd0, 0x27, 0x2e, 0x3f, 0x8c, 0x28, 0x19, + 0xec, 0x73, 0x0c, 0x1c, 0x40, 0xe9, 0xbf, 0xd7, 0x40, 0x99, 0xe2, 0xd7, 0x9d, 0x11, 0xe7, 0xf8, + 0xa6, 0xed, 0x9e, 0xca, 0x6d, 0x55, 0xee, 0x1c, 0x4a, 0x3a, 0x56, 0x12, 0x17, 0x95, 0xd8, 0xcc, + 0x8c, 0x25, 0xf6, 0x0a, 0x14, 0x4c, 0xcf, 0xf5, 0x6d, 0xb7, 0x3f, 0x91, 0x5f, 0x76, 0x25, 0x1d, + 0x2b, 0x09, 0xfd, 0x69, 0x16, 0x4a, 0xdc, 0xd7, 0xb0, 0xc6, 0x7f, 0x1d, 0xca, 0x4e, 0xfc, 0xf4, + 0xa4, 0xcf, 0x2f, 0x4b, 0x88, 0xe4, 0x7d, 0xc4, 0x49, 0x59, 0xae, 0x7c, 0x6c, 0x13, 0xc7, 0x52, + 0xca, 0x99, 0xa4, 0xf2, 0xf5, 0x38, 0x13, 0x27, 0x65, 0x79, 0x9e, 0x7d, 0xc0, 0xe3, 0x5a, 0x36, + 0x73, 0x6a, 0x6b, 0xbf, 0xc3, 0x89, 0x38, 0xe0, 0x5d, 0xb4, 0x3f, 0xf3, 0x33, 0xee, 0xcf, 0x35, + 0x58, 0xe2, 0x07, 0xe9, 0xf5, 0xfd, 0xb0, 0xe3, 0xcd, 0x89, 0xbe, 0x0b, 0x8d, 0x86, 0xb5, 0xa5, + 0xf7, 0x12, 0x1c, 0x3c, 0x26, 0x39, 0xb5, 0x7d, 0xc9, 0x7f, 0xd2, 0xf6, 0x85, 0xaf, 0xda, 0xb1, + 0xbb, 0xb6, 0x5f, 0x59, 0x10, 0x4e, 0xa8, 0x55, 0xdf, 0xe4, 0x44, 0x1c, 0xf0, 0x12, 0x47, 0x5a, + 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, 0x81, 0x6d, 0x52, 0x8f, 0xaf, 0x85, 0x17, 0x26, 0x96, 0x68, + 0xec, 0x55, 0x02, 0x0f, 0xd7, 0x18, 0xf2, 0xb9, 0x2b, 0xae, 0xe1, 0x7a, 0x41, 0xfb, 0x9e, 0x8b, + 0x5c, 0xb9, 0xc5, 0x89, 0x38, 0xe0, 0x5d, 0x5b, 0xe3, 0xf5, 0xe8, 0x17, 0x8f, 0x6b, 0x73, 0x8f, + 0x1e, 0xd7, 0xe6, 0x3e, 0x7c, 0x2c, 0x6b, 0xd3, 0xbf, 0x00, 0xe0, 0xf6, 0xd1, 0x0f, 0x89, 0x19, + 0xc4, 0xfc, 0xe5, 0x13, 0x04, 0xde, 0x63, 0xc8, 0xc1, 0x95, 0x78, 0x6d, 0x67, 0xc6, 0x7a, 0x8c, + 0x18, 0x0f, 0x27, 0x24, 0x51, 0x03, 0x8a, 0x6a, 0xaa, 0x20, 0xe3, 0x7b, 0x55, 0xaa, 0x15, 0xd5, + 0xe8, 0x01, 0x47, 0x32, 0x89, 0x0b, 0x38, 0x7f, 0xe9, 0x05, 0x6c, 0x42, 0xb6, 0x6f, 0x5b, 0x22, + 0x24, 0x8a, 0xcd, 0x2f, 0x87, 0x09, 0xf0, 0xce, 0x7e, 0xeb, 0x7c, 0x58, 0x7b, 0x75, 0xda, 0x48, + 0xce, 0x1f, 0xf4, 0x08, 0xab, 0xdf, 0xd9, 0x6f, 0x61, 0xae, 0x7c, 0x51, 0x90, 0xe6, 0x67, 0x0c, + 0xd2, 0x6d, 0x00, 0xb9, 0x6a, 0xae, 0x1d, 0xc4, 0x86, 0x9a, 0xb0, 0xdc, 0x50, 0x1c, 0x1c, 0x93, + 0x42, 0x0c, 0x56, 0x4d, 0xfe, 0xce, 0xb4, 0x3d, 0x97, 0x1f, 0x3d, 0xf3, 0x8d, 0x6e, 0x30, 0x63, + 0x28, 0x6d, 0x7f, 0x31, 0x5d, 0xc6, 0xe4, 0x6a, 0xcd, 0x57, 0xa4, 0x99, 0xd5, 0xdd, 0x71, 0x30, + 0x3c, 0x89, 0x8f, 0x3c, 0x58, 0xb5, 0xe4, 0xcb, 0x28, 0x32, 0x5a, 0x9c, 0xd9, 0xe8, 0xcb, 0xdc, + 0x60, 0x6b, 0x1c, 0x08, 0x4f, 0x62, 0xa3, 0xef, 0xc3, 0x7a, 0x48, 0x9c, 0x7c, 0x9e, 0x56, 0x40, + 0xec, 0x54, 0x95, 0x3f, 0xdc, 0x5b, 0x53, 0xa5, 0xf0, 0x73, 0x10, 0x90, 0x05, 0x79, 0x27, 0xe8, + 0x2e, 0x4a, 0xa2, 0x22, 0x7c, 0x23, 0xdd, 0x2a, 0xa2, 0xe8, 0xaf, 0xc7, 0xbb, 0x0a, 0xf5, 0xfc, + 0x92, 0x0d, 0x85, 0xc4, 0x46, 0x0f, 0xa1, 0x64, 0xb8, 0xae, 0xe7, 0x1b, 0xc1, 0x83, 0x79, 0x51, + 0x98, 0xda, 0x99, 0xd9, 0xd4, 0x4e, 0x84, 0x31, 0xd6, 0xc5, 0xc4, 0x38, 0x38, 0x6e, 0x0a, 0x3d, + 0x80, 0x65, 0xef, 0x81, 0x4b, 0x28, 0x26, 0xc7, 0x84, 0x12, 0xd7, 0x24, 0xac, 0x52, 0x16, 0xd6, + 0xbf, 0x92, 0xd2, 0x7a, 0x42, 0x39, 0x0a, 0xe9, 0x24, 0x9d, 0xe1, 0x71, 0x2b, 0xa8, 0x0e, 0x70, + 0x6c, 0xbb, 0xb2, 0x17, 0xad, 0x2c, 0x45, 0x63, 0xb2, 0xeb, 0x8a, 0x8a, 0x63, 0x12, 0xe8, 0xab, + 0x50, 0x32, 0x9d, 0x3e, 0xf3, 0x49, 0x30, 0x8f, 0x5b, 0x16, 0x37, 0x48, 0xad, 0x6f, 0x37, 0x62, + 0xe1, 0xb8, 0x1c, 0x3a, 0x81, 0x45, 0x3b, 0xd6, 0xf4, 0x56, 0x56, 0x44, 0x2c, 0x6e, 0xcf, 0xdc, + 0xe9, 0xb2, 0xe6, 0x0a, 0xcf, 0x44, 0x71, 0x0a, 0x4e, 0x20, 0xaf, 0x7f, 0x0d, 0x4a, 0x9f, 0xb0, + 0x07, 0xe3, 0x3d, 0xdc, 0xf8, 0xd1, 0xcd, 0xd4, 0xc3, 0xfd, 0x25, 0x03, 0x4b, 0xc9, 0x0d, 0x57, + 0x6f, 0x1d, 0x6d, 0xea, 0x7c, 0x35, 0xcc, 0xca, 0xd9, 0xa9, 0x59, 0x59, 0x26, 0xbf, 0xf9, 0x17, + 0x49, 0x7e, 0xdb, 0x00, 0x46, 0xcf, 0x0e, 0xf3, 0x5e, 0x90, 0x47, 0x55, 0xe6, 0x8a, 0x26, 0x7e, + 0x38, 0x26, 0x25, 0x26, 0xa8, 0x9e, 0xeb, 0x53, 0xcf, 0x71, 0x08, 0x95, 0xc5, 0x34, 0x98, 0xa0, + 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xeb, 0x80, 0x8e, 0x1c, 0xcf, 0x3c, 0x15, 0x5b, 0x10, 0xde, 0x73, + 0x91, 0x25, 0x0b, 0xc1, 0xe0, 0xaa, 0x39, 0xc1, 0xc5, 0x17, 0x68, 0xe8, 0x0b, 0x90, 0x6b, 0xf3, + 0xb6, 0x42, 0xbf, 0x0d, 0xc9, 0x99, 0x13, 0x7a, 0x27, 0xd8, 0x09, 0x4d, 0x0d, 0x85, 0x66, 0xdb, + 0x05, 0xfd, 0x0a, 0x14, 0xb1, 0xe7, 0xf9, 0x6d, 0xc3, 0x3f, 0x61, 0xa8, 0x06, 0xb9, 0x1e, 0xff, + 0x47, 0x8e, 0xfb, 0xc4, 0xac, 0x5a, 0x70, 0x70, 0x40, 0xd7, 0x7f, 0xa9, 0xc1, 0x2b, 0x53, 0xe7, + 0x8c, 0x7c, 0x47, 0x4d, 0xf5, 0x25, 0x5d, 0x52, 0x3b, 0x1a, 0xc9, 0xe1, 0x98, 0x14, 0xef, 0xc4, + 0x12, 0xc3, 0xc9, 0xf1, 0x4e, 0x2c, 0x61, 0x0d, 0x27, 0x65, 0xf5, 0x7f, 0x67, 0x20, 0x1f, 0x3c, + 0xcb, 0x3e, 0xe3, 0xe6, 0xfb, 0x75, 0xc8, 0x33, 0x61, 0x47, 0xba, 0xa7, 0xb2, 0x65, 0x60, 0x1d, + 0x4b, 0x2e, 0x6f, 0x62, 0xba, 0x84, 0x31, 0xa3, 0x13, 0x06, 0xaf, 0x6a, 0x62, 0x0e, 0x02, 0x32, + 0x0e, 0xf9, 0xe8, 0x6d, 0xfe, 0x0a, 0x35, 0x98, 0xea, 0x0b, 0xab, 0x21, 0x24, 0x16, 0xd4, 0xf3, + 0x61, 0x6d, 0x51, 0x82, 0x8b, 0x6f, 0x2c, 0xa5, 0xd1, 0x3d, 0x58, 0xb0, 0x88, 0x6f, 0xd8, 0x4e, + 0xd0, 0x0e, 0xa6, 0x9e, 0x5e, 0x06, 0x60, 0xad, 0x40, 0xb5, 0x59, 0xe2, 0x3e, 0xc9, 0x0f, 0x1c, + 0x02, 0xf2, 0x8b, 0x67, 0x7a, 0x56, 0xf0, 0x93, 0x42, 0x2e, 0xba, 0x78, 0xbb, 0x9e, 0x45, 0xb0, + 0xe0, 0xe8, 0x8f, 0x34, 0x28, 0x05, 0x48, 0xbb, 0x46, 0x9f, 0x11, 0xb4, 0xa5, 0x56, 0x11, 0x1c, + 0x77, 0x58, 0x93, 0xe7, 0xdf, 0x1b, 0xf4, 0xc8, 0xf9, 0xb0, 0x56, 0x14, 0x62, 0xfc, 0x43, 0x2d, + 0x20, 0xb6, 0x47, 0x99, 0x4b, 0xf6, 0xe8, 0x35, 0xc8, 0x89, 0xd6, 0x5b, 0x6e, 0xa6, 0x6a, 0xf4, + 0x44, 0x7b, 0x8e, 0x03, 0x9e, 0xfe, 0x71, 0x06, 0xca, 0x89, 0xc5, 0xa5, 0xe8, 0xea, 0xd4, 0xa8, + 0x24, 0x93, 0x62, 0xfc, 0x36, 0xfd, 0x87, 0xa0, 0xef, 0x42, 0xde, 0xe4, 0xeb, 0x0b, 0x7f, 0x89, + 0xdb, 0x9a, 0xe5, 0x28, 0xc4, 0xce, 0x44, 0x91, 0x24, 0x3e, 0x19, 0x96, 0x80, 0xe8, 0x06, 0xac, + 0x52, 0xe2, 0xd3, 0xc1, 0xce, 0xb1, 0x4f, 0x68, 0xbc, 0xff, 0xcf, 0x45, 0x7d, 0x0f, 0x1e, 0x17, + 0xc0, 0x93, 0x3a, 0x61, 0xaa, 0xcc, 0xbf, 0x40, 0xaa, 0xd4, 0x1d, 0x98, 0xff, 0x2f, 0xf6, 0xe8, + 0xdf, 0x83, 0x62, 0xd4, 0x45, 0x7d, 0xca, 0x26, 0xf5, 0x1f, 0x40, 0x81, 0x47, 0x63, 0xd8, 0xfd, + 0x5f, 0x52, 0x89, 0x92, 0x35, 0x22, 0x93, 0xa6, 0x46, 0xe8, 0x6f, 0x41, 0xf9, 0x4e, 0xcf, 0x9a, + 0xed, 0x57, 0x14, 0x7d, 0x1b, 0x82, 0x1f, 0x05, 0x79, 0x0a, 0x0e, 0x9e, 0xf9, 0xb1, 0x14, 0x1c, + 0x7f, 0xb3, 0x27, 0x7f, 0xaf, 0x01, 0xf1, 0xe6, 0xdc, 0x3b, 0x23, 0xae, 0xcf, 0x57, 0xc3, 0x8f, + 0x6d, 0x7c, 0x35, 0xe2, 0xee, 0x09, 0x0e, 0xba, 0x03, 0x79, 0x4f, 0xb4, 0x64, 0x72, 0xf0, 0x35, + 0xe3, 0x0c, 0x41, 0x85, 0x6a, 0xd0, 0xd7, 0x61, 0x09, 0xd6, 0xdc, 0x7c, 0xf2, 0xac, 0x3a, 0xf7, + 0xf4, 0x59, 0x75, 0xee, 0xa3, 0x67, 0xd5, 0xb9, 0x0f, 0x46, 0x55, 0xed, 0xc9, 0xa8, 0xaa, 0x3d, + 0x1d, 0x55, 0xb5, 0x8f, 0x46, 0x55, 0xed, 0xe3, 0x51, 0x55, 0x7b, 0xf4, 0x8f, 0xea, 0xdc, 0xbd, + 0xcc, 0xd9, 0xd6, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xab, 0xec, 0x02, 0x4a, 0x00, 0x21, 0x00, + 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index b4dc78b3e..d845d7b0f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,7 +17,6 @@ limitations under the License. package v1 import ( - "encoding/json" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -228,40 +227,8 @@ func NewUIDPreconditions(uid string) *Preconditions { return &Preconditions{UID: &u} } -// NewRVDeletionPrecondition returns a DeleteOptions with a ResourceVersion precondition set. -func NewRVDeletionPrecondition(rv string) *DeleteOptions { - p := Preconditions{ResourceVersion: &rv} - return &DeleteOptions{Preconditions: &p} -} - // HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. func HasObjectMetaSystemFieldValues(meta Object) bool { return !meta.GetCreationTimestamp().Time.IsZero() || len(meta.GetUID()) != 0 } - -// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields -// for a pre-existing object. This is opt-in for new objects with Status subresource. -func ResetObjectMetaForStatus(meta, existingMeta Object) { - meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp()) - meta.SetGeneration(existingMeta.GetGeneration()) - meta.SetSelfLink(existingMeta.GetSelfLink()) - meta.SetLabels(existingMeta.GetLabels()) - meta.SetAnnotations(existingMeta.GetAnnotations()) - meta.SetFinalizers(existingMeta.GetFinalizers()) - meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) - meta.SetManagedFields(existingMeta.GetManagedFields()) -} - -// MarshalJSON implements json.Marshaler -func (f Fields) MarshalJSON() ([]byte, error) { - return json.Marshal(&f.Map) -} - -// UnmarshalJSON implements json.Unmarshaler -func (f *Fields) UnmarshalJSON(b []byte) error { - return json.Unmarshal(b, &f.Map) -} - -var _ json.Marshaler = Fields{} -var _ json.Unmarshaler = &Fields{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 37141bd5d..ee1447541 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -63,8 +63,6 @@ type Object interface { SetOwnerReferences([]OwnerReference) GetClusterName() string SetClusterName(clusterName string) - GetManagedFields() []ManagedFieldsEntry - SetManagedFields(managedFields []ManagedFieldsEntry) } // ListMetaAccessor retrieves the list interface from an object @@ -94,8 +92,6 @@ type ListInterface interface { SetSelfLink(selfLink string) GetContinue() string SetContinue(c string) - GetRemainingItemCount() *int64 - SetRemainingItemCount(c *int64) } // Type exposes the type and APIVersion of versioned or internal API objects. @@ -107,16 +103,12 @@ type Type interface { SetKind(kind string) } -var _ ListInterface = &ListMeta{} - func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ListMeta) GetContinue() string { return meta.Continue } func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } -func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } -func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -174,9 +166,5 @@ func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return m func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { meta.OwnerReferences = references } -func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } -func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } -func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields } -func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) { - meta.ManagedFields = managedFields -} +func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } +func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index cdd9a6a7a..6f6c5111b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -41,6 +41,11 @@ func (t *MicroTime) DeepCopyInto(out *MicroTime) { *out = *t } +// String returns the representation of the time. +func (t MicroTime) String() string { + return t.Time.String() +} + // NewMicroTime returns a wrapped instance of the provided time func NewMicroTime(time time.Time) MicroTime { return MicroTime{time} @@ -67,40 +72,22 @@ func (t *MicroTime) IsZero() bool { // Before reports whether the time instant t is before u. func (t *MicroTime) Before(u *MicroTime) bool { - if t != nil && u != nil { - return t.Time.Before(u.Time) - } - return false + return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. func (t *MicroTime) Equal(u *MicroTime) bool { - if t == nil && u == nil { - return true - } - if t != nil && u != nil { - return t.Time.Equal(u.Time) - } - return false + return t.Time.Equal(u.Time) } // BeforeTime reports whether the time instant t is before second-lever precision u. func (t *MicroTime) BeforeTime(u *Time) bool { - if t != nil && u != nil { - return t.Time.Before(u.Time) - } - return false + return t.Time.Before(u.Time) } // EqualTime reports whether the time instant t is equal to second-lever precision u. func (t *MicroTime) EqualTime(u *Time) bool { - if t == nil && u == nil { - return true - } - if t != nil && u != nil { - return t.Time.Equal(u.Time) - } - return false + return t.Time.Equal(u.Time) } // UnixMicro returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 368efe1ef..0827729d0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -55,7 +55,6 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &DeleteOptions{}, &CreateOptions{}, &UpdateOptions{}, - &PatchOptions{}, ) utilruntime.Must(scheme.AddConversionFuncs( Convert_v1_WatchEvent_To_watch_Event, @@ -91,26 +90,8 @@ func init() { &DeleteOptions{}, &CreateOptions{}, &UpdateOptions{}, - &PatchOptions{}, ) - if err := AddMetaToScheme(scheme); err != nil { - panic(err) - } - // register manually. This usually goes through the SchemeBuilder, which we cannot use here. utilruntime.Must(RegisterDefaults(scheme)) } - -func AddMetaToScheme(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Table{}, - &TableOptions{}, - &PartialObjectMetadata{}, - &PartialObjectMetadataList{}, - ) - - return scheme.AddConversionFuncs( - Convert_Slice_string_To_v1_IncludeObjectPolicy, - ) -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index fe510ed9e..efff656e1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,7 +20,7 @@ import ( "encoding/json" "time" - fuzz "github.com/google/gofuzz" + "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -41,6 +41,11 @@ func (t *Time) DeepCopyInto(out *Time) { *out = *t } +// String returns the representation of the time. +func (t Time) String() string { + return t.Time.String() +} + // NewTime returns a wrapped instance of the provided time func NewTime(time time.Time) Time { return Time{time} @@ -67,10 +72,7 @@ func (t *Time) IsZero() bool { // Before reports whether the time instant t is before u. func (t *Time) Before(u *Time) bool { - if t != nil && u != nil { - return t.Time.Before(u.Time) - } - return false + return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. @@ -145,12 +147,8 @@ func (t Time) MarshalJSON() ([]byte, error) { // Encode unset/nil objects as JSON's "null". return []byte("null"), nil } - buf := make([]byte, 0, len(time.RFC3339)+2) - buf = append(buf, '"') - // time cannot contain non escapable JSON characters - buf = t.UTC().AppendFormat(buf, time.RFC3339) - buf = append(buf, '"') - return buf, nil + + return json.Marshal(t.UTC().Format(time.RFC3339)) } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 46ef65f45..c1743382a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -81,21 +81,6 @@ type ListMeta struct { // identical to the value in the first response, unless you have received this token from an error // message. Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` - - // remainingItemCount is the number of subsequent items in the list which are not included in this - // list response. If the list request contained label or field selectors, then the number of - // remaining items is unknown and the field will be left unset and omitted during serialization. - // If the list is complete (either because it is not chunking or because this is the last chunk), - // then there are no more remaining items and this field will be left unset and omitted during - // serialization. - // Servers older than v1.15 do not set this field. - // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients - // should not rely on the remainingItemCount to be set or to be exact. - // - // This field is alpha and can be changed or removed without notice. - // - // +optional - RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -250,8 +235,6 @@ type ObjectMeta struct { // When an object is created, the system will populate this list with the current set of initializers. // Only privileged users may set or modify this list. Once it is empty, it may not be modified further // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` // Must be empty before the object is deleted from the registry. Each entry @@ -267,19 +250,6 @@ type ObjectMeta struct { // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` - - // ManagedFields maps workflow-id and version to the set of fields - // that are managed by that workflow. This is mostly for internal - // housekeeping, and users typically shouldn't need to set or - // understand this field. A workflow can be the user's name, a - // controller's name, or the name of a specific apply path like - // "ci-cd". The set of fields is always in the version that the - // workflow used when modifying the object. - // - // This field is alpha and can be changed or removed without notice. - // - // +optional - ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } // Initializers tracks the progress of initialization. @@ -316,8 +286,8 @@ const ( ) // OwnerReference contains enough information to let you identify an owning -// object. An owning object must be in the same namespace as the dependent, or -// be cluster-scoped, so there is no namespace field. +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` @@ -357,27 +327,13 @@ type ListOptions struct { // Defaults to everything. // +optional FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` - - // +k8s:deprecated=includeUninitialized,protobuf=6 - + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` - // allowWatchBookmarks requests watch events with type "BOOKMARK". - // Servers that do not implement bookmarks may ignore this flag and - // bookmarks are sent at the server's discretion. Clients should not - // assume bookmarks are returned at any specific interval, nor may they - // assume the server will send any BOOKMARK event during a session. - // If this is not a watch, this field is ignored. - // If the feature gate WatchBookmarks is not enabled in apiserver, - // this field is ignored. - // - // This field is alpha and can be changed or removed without notice. - // - // +optional - AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` - // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -428,14 +384,11 @@ type ListOptions struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExportOptions is the query options to the standard REST get call. -// Deprecated. Planned for removal in 1.18. type ExportOptions struct { TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify. - // Deprecated. Planned for removal in 1.18. Export bool `json:"export" protobuf:"varint,1,opt,name=export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. - // Deprecated. Planned for removal in 1.18. Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } @@ -449,7 +402,9 @@ type GetOptions struct { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` - // +k8s:deprecated=includeUninitialized,protobuf=2 + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` } // DeletionPropagation decides if a deletion will propagate to the dependents of @@ -534,52 +489,15 @@ type CreateOptions struct { // - All: all dry run stages will be processed // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - // +k8s:deprecated=includeUninitialized,protobuf=2 - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. - // +optional - FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PatchOptions may be provided when patching an API object. -// PatchOptions is meant to be a superset of UpdateOptions. -type PatchOptions struct { - TypeMeta `json:",inline"` - - // When present, indicates that modifications should not be - // persisted. An invalid or unrecognized dryRun directive will - // result in an error response and no further processing of the - // request. Valid values are: - // - All: all dry run stages will be processed - // +optional - DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - - // Force is going to "force" Apply requests. It means user will - // re-acquire conflicting fields owned by other people. Force - // flag must be unset for non-apply patch requests. - // +optional - Force *bool `json:"force,omitempty" protobuf:"varint,2,opt,name=force"` - - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. This - // field is required for apply requests - // (application/apply-patch) but optional for non-apply patch - // types (JsonPatch, MergePatch, StrategicMergePatch). - // +optional - FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` + // If IncludeUninitialized is specified, the object may be + // returned without completing initialization. + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UpdateOptions may be provided when updating an API object. -// All fields in UpdateOptions should also be present in PatchOptions. type UpdateOptions struct { TypeMeta `json:",inline"` @@ -590,13 +508,6 @@ type UpdateOptions struct { // - All: all dry run stages will be processed // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. - // +optional - FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. @@ -604,9 +515,6 @@ type Preconditions struct { // Specifies the target UID. // +optional UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Specifies the target ResourceVersion - // +optional - ResourceVersion *string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -884,9 +792,6 @@ const ( // without the expected return type. The presence of this cause indicates the error may be // due to an intervening proxy or the server software malfunctioning. CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" - // FieldManagerConflict is used to report when another client claims to manage this field, - // It should only be returned for a request using server-side apply. - CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -1001,15 +906,6 @@ type APIResource struct { ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` // categories is a list of the grouped resources this resource belongs to (e.g. 'all') Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` - // The hash value of the storage version, the version this resource is - // converted to when written to the data store. Value must be treated - // as opaque by clients. Only equality comparison on the value is valid. - // This is an alpha feature and may change or be removed in the future. - // The field is populated by the apiserver only if the - // StorageVersionHash feature gate is enabled. - // This field will remain optional even if it graduates. - // +optional - StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"` } // Verbs masks the value so protobuf can generate @@ -1111,210 +1007,3 @@ const ( LabelSelectorOpExists LabelSelectorOperator = "Exists" LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) - -// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource -// that the fieldset applies to. -type ManagedFieldsEntry struct { - // Manager is an identifier of the workflow managing these fields. - Manager string `json:"manager,omitempty" protobuf:"bytes,1,opt,name=manager"` - // Operation is the type of operation which lead to this ManagedFieldsEntry being created. - // The only valid values for this field are 'Apply' and 'Update'. - Operation ManagedFieldsOperationType `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ManagedFieldsOperationType"` - // APIVersion defines the version of this resource that this field set - // applies to. The format is "group/version" just like the top-level - // APIVersion field. It is necessary to track the version of a field - // set because it cannot be automatically converted. - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` - // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' - // +optional - Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` - // Fields identifies a set of fields. - // +optional - Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` -} - -// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. -type ManagedFieldsOperationType string - -const ( - ManagedFieldsOperationApply ManagedFieldsOperationType = "Apply" - ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" -) - -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -type Fields struct { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - Map map[string]Fields `json:",inline" protobuf:"bytes,1,rep,name=map"` -} - -// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf -// generation to support a meta type that can accept any valid JSON. This can be introduced -// in a v1 because clients a) receive an error if they try to access proto today, and b) -// once introduced they would be able to gracefully switch over to using it. - -// Table is a tabular representation of a set of API resources. The server transforms the -// object into a set of preferred columns for quickly reviewing the objects. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +protobuf=false -type Table struct { - TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - ListMeta `json:"metadata,omitempty"` - - // columnDefinitions describes each column in the returned items array. The number of cells per row - // will always match the number of column definitions. - ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` - // rows is the list of items in the table. - Rows []TableRow `json:"rows"` -} - -// TableColumnDefinition contains information about a column returned in the Table. -// +protobuf=false -type TableColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name"` - // type is an OpenAPI type definition for this column, such as number, integer, string, or - // array. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string `json:"type"` - // format is an optional OpenAPI type modifier for this column. A format modifies the type and - // imposes additional rules, like date or time formatting for a string. The 'name' format is applied - // to the primary identifier column which has type 'string' to assist in clients identifying column - // is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string `json:"format"` - // description is a human readable description of this column. - Description string `json:"description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 `json:"priority"` -} - -// TableRow is an individual row in a table. -// +protobuf=false -type TableRow struct { - // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or - // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a - // more detailed description. - Cells []interface{} `json:"cells"` - // conditions describe additional status of a row that are relevant for a human user. These conditions - // apply to the row, not to the object, and will be specific to table output. The only defined - // condition type is 'Completed', for a row that indicates a resource that has run to completion and - // can be given less visual priority. - // +optional - Conditions []TableRowCondition `json:"conditions,omitempty"` - // This field contains the requested additional information about each object based on the includeObject - // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the - // default serialization of the object for the current API version, and if "Metadata" (the default) will - // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. - // The media type of the object will always match the enclosing list - if this as a JSON table, these - // will be JSON encoded objects. - // +optional - Object runtime.RawExtension `json:"object,omitempty"` -} - -// TableRowCondition allows a row to be marked with additional information. -// +protobuf=false -type TableRowCondition struct { - // Type of row condition. The only defined value is 'Completed' indicating that the - // object this row represents has reached a completed state and may be given less visual - // priority than other rows. Clients are not required to honor any conditions but should - // be consistent where possible about handling the conditions. - Type RowConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` - // (brief) machine readable reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -type RowConditionType string - -// These are valid conditions of a row. This list is not exhaustive and new conditions may be -// included by other resources. -const ( - // RowCompleted means the underlying resource has reached completion and may be given less - // visual priority than other resources. - RowCompleted RowConditionType = "Completed" -) - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// IncludeObjectPolicy controls which portion of the object is returned with a Table. -type IncludeObjectPolicy string - -const ( - // IncludeNone returns no object. - IncludeNone IncludeObjectPolicy = "None" - // IncludeMetadata serializes the object containing only its metadata field. - IncludeMetadata IncludeObjectPolicy = "Metadata" - // IncludeObject contains the full object. - IncludeObject IncludeObjectPolicy = "Object" -) - -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions struct { - TypeMeta `json:",inline"` - - // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions - // and may be removed as a field in a future release. - NoHeaders bool `json:"-"` - - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` -} - -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata struct { - TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` -} - -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadataList struct { - TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items contains each of the included items. - Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index f35c22bf1..35e800f8a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -49,17 +49,16 @@ func (APIGroupList) SwaggerDoc() map[string]string { } var map_APIResource = map[string]string{ - "": "APIResource specifies the name of a resource and whether it is namespaced.", - "name": "name is the plural name of the resource.", - "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", - "namespaced": "namespaced indicates if a resource is namespaced or not.", - "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", - "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", - "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", - "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", - "shortNames": "shortNames is a list of suggested short names of the resource.", - "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", - "storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", } func (APIResource) SwaggerDoc() map[string]string { @@ -87,9 +86,9 @@ func (APIVersions) SwaggerDoc() map[string]string { } var map_CreateOptions = map[string]string{ - "": "CreateOptions may be provided when creating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "includeUninitialized": "If IncludeUninitialized is specified, the object may be returned without completing initialization.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -110,26 +109,19 @@ func (DeleteOptions) SwaggerDoc() map[string]string { } var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.", - "export": "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.", + "": "ExportOptions is the query options to the standard REST get call.", + "export": "Should this value be exported. Export strips fields that a user can not specify.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", } func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } -var map_Fields = map[string]string{ - "": "Fields stores a set of fields in a data structure like a Trie. To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff", -} - -func (Fields) SwaggerDoc() map[string]string { - return map_Fields -} - var map_GetOptions = map[string]string{ - "": "GetOptions is the standard query options to the standard REST get call.", - "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", } func (GetOptions) SwaggerDoc() map[string]string { @@ -197,11 +189,10 @@ func (List) SwaggerDoc() map[string]string { } var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", - "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.\n\nThis field is alpha and can be changed or removed without notice.", + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -209,34 +200,21 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is alpha and can be changed or removed without notice.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { return map_ListOptions } -var map_ManagedFieldsEntry = map[string]string{ - "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", - "manager": "Manager is an identifier of the workflow managing these fields.", - "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", - "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", - "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", - "fields": "Fields identifies a set of fields.", -} - -func (ManagedFieldsEntry) SwaggerDoc() map[string]string { - return map_ManagedFieldsEntry -} - var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", @@ -252,10 +230,9 @@ var map_ObjectMeta = map[string]string{ "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.", + "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -263,7 +240,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { } var map_OwnerReference = map[string]string{ - "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", @@ -276,25 +253,6 @@ func (OwnerReference) SwaggerDoc() map[string]string { return map_OwnerReference } -var map_PartialObjectMetadata = map[string]string{ - "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", -} - -func (PartialObjectMetadata) SwaggerDoc() map[string]string { - return map_PartialObjectMetadata -} - -var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "items contains each of the included items.", -} - -func (PartialObjectMetadataList) SwaggerDoc() map[string]string { - return map_PartialObjectMetadataList -} - var map_Patch = map[string]string{ "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", } @@ -303,21 +261,9 @@ func (Patch) SwaggerDoc() map[string]string { return map_Patch } -var map_PatchOptions = map[string]string{ - "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", -} - -func (PatchOptions) SwaggerDoc() map[string]string { - return map_PatchOptions -} - var map_Preconditions = map[string]string{ - "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", - "uid": "Specifies the target UID.", - "resourceVersion": "Specifies the target ResourceVersion", + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", } func (Preconditions) SwaggerDoc() map[string]string { @@ -382,62 +328,6 @@ func (StatusDetails) SwaggerDoc() map[string]string { return map_StatusDetails } -var map_Table = map[string]string{ - "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", - "rows": "rows is the list of items in the table.", -} - -func (Table) SwaggerDoc() map[string]string { - return map_Table -} - -var map_TableColumnDefinition = map[string]string{ - "": "TableColumnDefinition contains information about a column returned in the Table.", - "name": "name is a human readable name for the column.", - "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "description": "description is a human readable description of this column.", - "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", -} - -func (TableColumnDefinition) SwaggerDoc() map[string]string { - return map_TableColumnDefinition -} - -var map_TableOptions = map[string]string{ - "": "TableOptions are used when a Table is requested by the caller.", - "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", -} - -func (TableOptions) SwaggerDoc() map[string]string { - return map_TableOptions -} - -var map_TableRow = map[string]string{ - "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", - "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", - "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", -} - -func (TableRow) SwaggerDoc() map[string]string { - return map_TableRow -} - -var map_TableRowCondition = map[string]string{ - "": "TableRowCondition allows a row to be marked with additional information.", - "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", - "status": "Status of the condition, one of True, False, Unknown.", - "reason": "(brief) machine readable reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (TableRowCondition) SwaggerDoc() map[string]string { - return map_TableRowCondition -} - var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", @@ -449,9 +339,8 @@ func (TypeMeta) SwaggerDoc() map[string]string { } var map_UpdateOptions = map[string]string{ - "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "UpdateOptions may be provided when updating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 3b07e86db..fc138e75a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -47,9 +47,6 @@ func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{ var val interface{} = obj for i, field := range fields { - if val == nil { - return nil, false, nil - } if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { @@ -275,22 +272,6 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - val, found, err := NestedInt64(obj, fields...) - if !found || err != nil { - return 0 - } - return val -} - -func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { - val, found, err := NestedInt64(obj, fields...) - if !found || err != nil { - return nil - } - return &val -} - func jsonPath(fields []string) string { return "." + strings.Join(fields, ".") } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 0ba18d45d..781469ec2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -47,7 +47,6 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} -var _ metav1.ListInterface = &Unstructured{} func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } @@ -127,16 +126,6 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error { return err } -// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. -// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. -func (in *Unstructured) NewEmptyInstance() runtime.Unstructured { - out := new(Unstructured) - if in != nil { - out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind()) - } - return out -} - func (in *Unstructured) DeepCopy() *Unstructured { if in == nil { return nil @@ -154,20 +143,13 @@ func (u *Unstructured) setNestedField(value interface{}, fields ...string) { SetNestedField(u.Object, value, fields...) } -func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) { +func (u *Unstructured) setNestedSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } SetNestedStringSlice(u.Object, value, fields...) } -func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - SetNestedSlice(u.Object, value, fields...) -} - func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) @@ -330,18 +312,6 @@ func (u *Unstructured) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } -func (u *Unstructured) GetRemainingItemCount() *int64 { - return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") -} - -func (u *Unstructured) SetRemainingItemCount(c *int64) { - if c == nil { - RemoveNestedField(u.Object, "metadata", "remainingItemCount") - } else { - u.setNestedField(*c, "metadata", "remainingItemCount") - } -} - func (u *Unstructured) GetCreationTimestamp() metav1.Time { var timestamp metav1.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) @@ -466,7 +436,7 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { RemoveNestedField(u.Object, "metadata", "finalizers") return } - u.setNestedStringSlice(finalizers, "metadata", "finalizers") + u.setNestedSlice(finalizers, "metadata", "finalizers") } func (u *Unstructured) GetClusterName() string { @@ -480,42 +450,3 @@ func (u *Unstructured) SetClusterName(clusterName string) { } u.setNestedField(clusterName, "metadata", "clusterName") } - -func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { - items, found, err := NestedSlice(u.Object, "metadata", "managedFields") - if !found || err != nil { - return nil - } - managedFields := []metav1.ManagedFieldsEntry{} - for _, item := range items { - m, ok := item.(map[string]interface{}) - if !ok { - utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item)) - return nil - } - out := metav1.ManagedFieldsEntry{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err)) - return nil - } - managedFields = append(managedFields, out) - } - return managedFields -} - -func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { - if managedFields == nil { - RemoveNestedField(u.Object, "metadata", "managedFields") - return - } - items := []interface{}{} - for _, managedFieldsEntry := range managedFields { - out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err)) - return - } - items = append(items, out) - } - u.setNestedSlice(items, "metadata", "managedFields") -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index 5028f5fb5..bf3fd023f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,16 +52,6 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } -// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. -// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. -func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { - out := new(UnstructuredList) - if u != nil { - out.SetGroupVersionKind(u.GroupVersionKind()) - } - return out -} - // UnstructuredContent returns a map contain an overlay of the Items field onto // the Object field. Items always overwrites overlay. func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { @@ -176,18 +166,6 @@ func (u *UnstructuredList) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } -func (u *UnstructuredList) GetRemainingItemCount() *int64 { - return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") -} - -func (u *UnstructuredList) SetRemainingItemCount(c *int64) { - if c == nil { - RemoveNestedField(u.Object, "metadata", "remainingItemCount") - } else { - u.setNestedField(*c, "metadata", "remainingItemCount") - } -} - func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { u.SetAPIVersion(gvk.GroupVersion().String()) u.SetKind(gvk.Kind) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index eeb73999f..81f86fb30 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -17,11 +17,7 @@ limitations under the License. package validation import ( - "fmt" - "unicode" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" @@ -89,67 +85,21 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { *options.PropagationPolicy != metav1.DeletePropagationOrphan { allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) } - allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, validateDryRun(field.NewPath("dryRun"), options.DryRun)...) return allErrs } func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { - return append( - ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), - ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., - ) + return validateDryRun(field.NewPath("dryRun"), options.DryRun) } func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { - return append( - ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), - ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., - ) -} - -func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { - allErrs := field.ErrorList{} - if patchType != types.ApplyPatchType { - if options.Force != nil { - allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) - } - } else { - if options.FieldManager == "" { - // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. - allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) - } - } - allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) - allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) - return allErrs -} - -var FieldManagerMaxLength = 128 - -// ValidateFieldManager valides that the fieldManager is the proper length and -// only has printable characters. -func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // the field can not be set as a `*string`, so a empty string ("") is - // considered as not set and is defaulted by the rest of the process - // (unless apply is used, in which case it is required). - if len(fieldManager) > FieldManagerMaxLength { - allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) - } - // Verify that all characters are printable. - for i, r := range fieldManager { - if !unicode.IsPrint(r) { - allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) - } - } - - return allErrs + return validateDryRun(field.NewPath("dryRun"), options.DryRun) } var allowedDryRunValues = sets.NewString(metav1.DryRunAll) -// ValidateDryRun validates that a dryRun query param only contains allowed values. -func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { +func validateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { allErrs := field.ErrorList{} if !allowedDryRunValues.HasAll(dryRun...) { allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) @@ -158,14 +108,3 @@ func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { } const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` - -// ValidateTableOptions returns any invalid flags on TableOptions. -func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { - var allErrs field.ErrorList - switch opts.IncludeObject { - case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": - default: - allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) - } - return allErrs -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index fa179ac7b..10845993e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -312,29 +312,6 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Fields) DeepCopyInto(out *Fields) { - *out = *in - if in.Map != nil { - in, out := &in.Map, &out.Map - *out = make(map[string]Fields, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fields. -func (in *Fields) DeepCopy() *Fields { - if in == nil { - return nil - } - out := new(Fields) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GetOptions) DeepCopyInto(out *GetOptions) { *out = *in @@ -572,7 +549,7 @@ func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -604,11 +581,6 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListMeta) DeepCopyInto(out *ListMeta) { *out = *in - if in.RemainingItemCount != nil { - in, out := &in.RemainingItemCount, &out.RemainingItemCount - *out = new(int64) - **out = **in - } return } @@ -652,31 +624,6 @@ func (in *ListOptions) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { - *out = *in - if in.Time != nil { - in, out := &in.Time, &out.Time - *out = (*in).DeepCopy() - } - if in.Fields != nil { - in, out := &in.Fields, &out.Fields - *out = new(Fields) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldsEntry. -func (in *ManagedFieldsEntry) DeepCopy() *ManagedFieldsEntry { - if in == nil { - return nil - } - out := new(ManagedFieldsEntry) - in.DeepCopyInto(out) - return out -} - // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. func (in *MicroTime) DeepCopy() *MicroTime { if in == nil { @@ -731,13 +678,6 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.ManagedFields != nil { - in, out := &in.ManagedFields, &out.ManagedFields - *out = make([]ManagedFieldsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } @@ -777,65 +717,6 @@ func (in *OwnerReference) DeepCopy() *OwnerReference { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. -func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { - if in == nil { - return nil - } - out := new(PartialObjectMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PartialObjectMetadata, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. -func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { - if in == nil { - return nil - } - out := new(PartialObjectMetadataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Patch) DeepCopyInto(out *Patch) { *out = *in @@ -852,41 +733,6 @@ func (in *Patch) DeepCopy() *Patch { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PatchOptions) DeepCopyInto(out *PatchOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DryRun != nil { - in, out := &in.DryRun, &out.DryRun - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Force != nil { - in, out := &in.Force, &out.Force - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchOptions. -func (in *PatchOptions) DeepCopy() *PatchOptions { - if in == nil { - return nil - } - out := new(PatchOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PatchOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = *in @@ -895,11 +741,6 @@ func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = new(types.UID) **out = **in } - if in.ResourceVersion != nil { - in, out := &in.ResourceVersion, &out.ResourceVersion - *out = new(string) - **out = **in - } return } @@ -954,7 +795,7 @@ func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { func (in *Status) DeepCopyInto(out *Status) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Details != nil { in, out := &in.Details, &out.Details *out = new(StatusDetails) @@ -1018,108 +859,6 @@ func (in *StatusDetails) DeepCopy() *StatusDetails { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Table) DeepCopyInto(out *Table) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.ColumnDefinitions != nil { - in, out := &in.ColumnDefinitions, &out.ColumnDefinitions - *out = make([]TableColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Rows != nil { - in, out := &in.Rows, &out.Rows - *out = make([]TableRow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. -func (in *Table) DeepCopy() *Table { - if in == nil { - return nil - } - out := new(Table) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Table) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. -func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { - if in == nil { - return nil - } - out := new(TableColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableOptions) DeepCopyInto(out *TableOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. -func (in *TableOptions) DeepCopy() *TableOptions { - if in == nil { - return nil - } - out := new(TableOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TableOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRow) DeepCopyInto(out *TableRow) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. -func (in *TableRowCondition) DeepCopy() *TableRowCondition { - if in == nil { - return nil - } - out := new(TableRowCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. func (in *Time) DeepCopy() *Time { if in == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index 2b7e8ca0b..3b2bedd92 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -15,3 +15,30 @@ limitations under the License. */ package v1beta1 + +import "k8s.io/apimachinery/pkg/runtime" + +func (in *TableRow) DeepCopy() *TableRow { + if in == nil { + return nil + } + + out := new(TableRow) + + if in.Cells != nil { + out.Cells = make([]interface{}, len(in.Cells)) + for i := range in.Cells { + out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + } + } + + if in.Conditions != nil { + out.Conditions = make([]TableRowCondition, len(in.Conditions)) + for i := range in.Conditions { + in.Conditions[i].DeepCopyInto(&out.Conditions[i]) + } + } + + in.Object.DeepCopyInto(&out.Object) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index 20c9d2ec7..dc461cc29 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -19,5 +19,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io - -package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1" +package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index 1bcd80ee9..fe3df6916 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -24,7 +25,9 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto It has these top-level messages: + PartialObjectMetadata PartialObjectMetadataList + TableOptions */ package v1beta1 @@ -32,8 +35,6 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - import strings "strings" import reflect "reflect" @@ -50,15 +51,51 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptorGenerated, []int{1} } +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + func init() { + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") } +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -86,17 +123,49 @@ func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { i += n } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n1, err := m.ListMeta.MarshalTo(dAtA[i:]) + return i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { - return 0, err + return nil, err } - i += n1 + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i += copy(dAtA[i:], m.IncludeObject) return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -106,6 +175,14 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *PartialObjectMetadata) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PartialObjectMetadataList) Size() (n int) { var l int _ = l @@ -115,7 +192,13 @@ func (m *PartialObjectMetadataList) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = m.ListMeta.Size() + return n +} + +func (m *TableOptions) Size() (n int) { + var l int + _ = l + l = len(m.IncludeObject) n += 1 + l + sovGenerated(uint64(l)) return n } @@ -133,13 +216,32 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *PartialObjectMetadataList) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata", 1), `&`, ``, 1) + `,`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, `}`, }, "") return s @@ -152,7 +254,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -175,15 +277,15 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -207,14 +309,63 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -238,7 +389,8 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, &PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -263,6 +415,85 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } return nil } +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -373,26 +604,29 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 322 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, - 0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, - 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6, - 0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e, - 0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x7d, 0xfe, 0xcd, 0xef, 0x97, 0x7f, 0x12, 0x7f, 0x1c, - 0x9f, 0x58, 0x26, 0x35, 0x8f, 0xb3, 0x00, 0x4c, 0x02, 0x08, 0x96, 0xcf, 0x20, 0x99, 0x68, 0xc3, - 0xcb, 0x1f, 0x22, 0x95, 0x4a, 0x84, 0x53, 0x99, 0x80, 0x79, 0xe4, 0x69, 0x1c, 0xb9, 0xc0, 0x72, - 0x05, 0x28, 0xf8, 0x6c, 0x10, 0x00, 0x8a, 0x01, 0x8f, 0x20, 0x01, 0x23, 0x10, 0x26, 0x2c, 0x35, - 0x1a, 0x75, 0xf3, 0x70, 0x83, 0xb2, 0xaf, 0x28, 0x4b, 0xe3, 0xc8, 0x05, 0x96, 0x39, 0x94, 0x95, - 0x68, 0xbb, 0x1f, 0x49, 0x9c, 0x66, 0x01, 0x0b, 0xb5, 0xe2, 0x91, 0x8e, 0x34, 0x2f, 0x0c, 0x41, - 0x76, 0x57, 0x4c, 0xc5, 0x50, 0x7c, 0x6d, 0xcc, 0xed, 0xa3, 0x2a, 0xa5, 0xb6, 0xfb, 0xb4, 0x7f, - 0x3d, 0x8a, 0xc9, 0x12, 0x94, 0x0a, 0xbe, 0x01, 0xc7, 0x7f, 0x01, 0x36, 0x9c, 0x82, 0x12, 0xdb, - 0xdc, 0xc1, 0x0b, 0xf1, 0xf7, 0xaf, 0x84, 0x41, 0x29, 0x1e, 0x2e, 0x83, 0x7b, 0x08, 0xf1, 0x02, - 0x50, 0x4c, 0x04, 0x8a, 0x73, 0x69, 0xb1, 0x79, 0xeb, 0xd7, 0x24, 0x82, 0xb2, 0x2d, 0xd2, 0xfd, - 0xdf, 0x6b, 0x0c, 0x4f, 0x59, 0x95, 0x6b, 0x62, 0x3f, 0xfa, 0x46, 0xbb, 0xf3, 0x55, 0xc7, 0xcb, - 0x57, 0x9d, 0xda, 0x99, 0x33, 0x8e, 0x37, 0xe2, 0xe6, 0x8d, 0x5f, 0x57, 0xe5, 0x8a, 0xd6, 0xbf, - 0x2e, 0xe9, 0x35, 0x86, 0xac, 0xda, 0x26, 0xae, 0x9f, 0x73, 0x8f, 0xf6, 0x4a, 0x6f, 0xfd, 0x23, - 0x19, 0x7f, 0x1a, 0x47, 0xfd, 0xf9, 0x9a, 0x7a, 0x8b, 0x35, 0xf5, 0x96, 0x6b, 0xea, 0x3d, 0xe5, - 0x94, 0xcc, 0x73, 0x4a, 0x16, 0x39, 0x25, 0xcb, 0x9c, 0x92, 0xd7, 0x9c, 0x92, 0xe7, 0x37, 0xea, - 0x5d, 0xef, 0x94, 0x4f, 0xfb, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x10, 0x2f, 0x48, 0xbd, 0x5a, 0x02, - 0x00, 0x00, + // 375 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcd, 0x0a, 0xd3, 0x40, + 0x10, 0xc7, 0xb3, 0x48, 0xd1, 0x6e, 0xed, 0x25, 0x22, 0xd4, 0x1e, 0x36, 0xa5, 0xa7, 0x0a, 0x76, + 0xd7, 0x16, 0x11, 0x8f, 0x92, 0x5b, 0x41, 0x69, 0x09, 0x9e, 0x3c, 0xb9, 0x49, 0xc6, 0x74, 0xcd, + 0xc7, 0x86, 0xec, 0xa6, 0xd0, 0x8b, 0xf8, 0x08, 0x3e, 0x56, 0x8f, 0x3d, 0xf6, 0x14, 0x6c, 0x7c, + 0x0b, 0x4f, 0x92, 0x0f, 0xec, 0x87, 0x15, 0x7b, 0x9b, 0xf9, 0x0f, 0xbf, 0x5f, 0x66, 0xb2, 0xd8, + 0x09, 0xdf, 0x28, 0x2a, 0x24, 0x0b, 0x73, 0x17, 0xb2, 0x04, 0x34, 0x28, 0xb6, 0x81, 0xc4, 0x97, + 0x19, 0x6b, 0x07, 0x3c, 0x15, 0x31, 0xf7, 0xd6, 0x22, 0x81, 0x6c, 0xcb, 0xd2, 0x30, 0xa8, 0x02, + 0xc5, 0x62, 0xd0, 0x9c, 0x6d, 0x66, 0x2e, 0x68, 0x3e, 0x63, 0x01, 0x24, 0x90, 0x71, 0x0d, 0x3e, + 0x4d, 0x33, 0xa9, 0xa5, 0xf9, 0xbc, 0x41, 0xe9, 0x39, 0x4a, 0xd3, 0x30, 0xa8, 0x02, 0x45, 0x2b, + 0x94, 0xb6, 0xe8, 0x70, 0x1a, 0x08, 0xbd, 0xce, 0x5d, 0xea, 0xc9, 0x98, 0x05, 0x32, 0x90, 0xac, + 0x36, 0xb8, 0xf9, 0xe7, 0xba, 0xab, 0x9b, 0xba, 0x6a, 0xcc, 0xc3, 0x57, 0xf7, 0x2c, 0x75, 0xbd, + 0xcf, 0xf0, 0x9f, 0xa7, 0x64, 0x79, 0xa2, 0x45, 0x0c, 0x7f, 0x01, 0xaf, 0xff, 0x07, 0x28, 0x6f, + 0x0d, 0x31, 0xbf, 0xe6, 0xc6, 0x5b, 0xfc, 0x74, 0xc5, 0x33, 0x2d, 0x78, 0xb4, 0x74, 0xbf, 0x80, + 0xa7, 0xdf, 0x83, 0xe6, 0x3e, 0xd7, 0xdc, 0xfc, 0x84, 0x1f, 0xc5, 0x6d, 0x3d, 0x40, 0x23, 0x34, + 0xe9, 0xcd, 0x5f, 0xd2, 0x7b, 0x7e, 0x12, 0x3d, 0x79, 0x6c, 0x73, 0x57, 0x58, 0x46, 0x59, 0x58, + 0xf8, 0x94, 0x39, 0x7f, 0xac, 0xe3, 0xaf, 0xf8, 0xd9, 0xcd, 0x4f, 0xbf, 0x13, 0x4a, 0x9b, 0x1c, + 0x77, 0x84, 0x86, 0x58, 0x0d, 0xd0, 0xe8, 0xc1, 0xa4, 0x37, 0x7f, 0x4b, 0xef, 0x7e, 0x20, 0x7a, + 0x53, 0x6a, 0x77, 0xcb, 0xc2, 0xea, 0x2c, 0x2a, 0xa5, 0xd3, 0x98, 0xc7, 0x2e, 0x7e, 0xfc, 0x81, + 0xbb, 0x11, 0x2c, 0x53, 0x2d, 0x64, 0xa2, 0x4c, 0x07, 0xf7, 0x45, 0xe2, 0x45, 0xb9, 0x0f, 0x0d, + 0x5a, 0x9f, 0xdd, 0xb5, 0x5f, 0xb4, 0x47, 0xf4, 0x17, 0xe7, 0xc3, 0x5f, 0x85, 0xf5, 0xe4, 0x22, + 0x58, 0xc9, 0x48, 0x78, 0x5b, 0xe7, 0x52, 0x61, 0x4f, 0x77, 0x47, 0x62, 0xec, 0x8f, 0xc4, 0x38, + 0x1c, 0x89, 0xf1, 0xad, 0x24, 0x68, 0x57, 0x12, 0xb4, 0x2f, 0x09, 0x3a, 0x94, 0x04, 0xfd, 0x28, + 0x09, 0xfa, 0xfe, 0x93, 0x18, 0x1f, 0x1f, 0xb6, 0xab, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf3, + 0xe1, 0xde, 0x86, 0xdb, 0x02, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go index 108a0764e..d13254b41 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -39,12 +39,6 @@ var scheme = runtime.NewScheme() var ParameterCodec = runtime.NewParameterCodec(scheme) func init() { - if err := AddMetaToScheme(scheme); err != nil { - panic(err) - } -} - -func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Table{}, &TableOptions{}, @@ -52,9 +46,11 @@ func AddMetaToScheme(scheme *runtime.Scheme) error { &PartialObjectMetadataList{}, ) - return scheme.AddConversionFuncs( + if err := scheme.AddConversionFuncs( Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, - ) + ); err != nil { + panic(err) + } // register manually. This usually goes through the SchemeBuilder, which we cannot use here. //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 87895a5b5..344c533e1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -18,67 +18,144 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. + // Table is a tabular representation of a set of API resources. The server transforms the // object into a set of preferred columns for quickly reviewing the objects. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=false -type Table = v1.Table +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Table struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} // TableColumnDefinition contains information about a column returned in the Table. // +protobuf=false -type TableColumnDefinition = v1.TableColumnDefinition +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} // TableRow is an individual row in a table. // +protobuf=false -type TableRow = v1.TableRow +type TableRow struct { + // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple + // maps, or lists, or null. See the type field of the column definition for a more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} // TableRowCondition allows a row to be marked with additional information. // +protobuf=false -type TableRowCondition = v1.TableRowCondition +type TableRowCondition struct { + // Type of row condition. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} -type RowConditionType = v1.RowConditionType +type RowConditionType string -type ConditionStatus = v1.ConditionStatus +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) -type IncludeObjectPolicy = v1.IncludeObjectPolicy +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) // TableOptions are used when a Table is requested by the caller. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions = v1.TableOptions +type TableOptions struct { + v1.TypeMeta `json:",inline"` + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} // PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients // to get access to a particular ObjectMeta schema without knowing the details of the version. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata = v1.PartialObjectMetadata - -// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than -// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must -// remain independent of v1.PartialObjectMetadataList to preserve mappings. +type PartialObjectMetadata struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} -// PartialObjectMetadataList contains a list of objects containing only their metadata. +// PartialObjectMetadataList contains a list of objects containing only their metadata // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type PartialObjectMetadataList struct { v1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` // items contains each of the included items. - Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` + Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` } - -const ( - RowCompleted = v1.RowCompleted - - ConditionTrue = v1.ConditionTrue - ConditionFalse = v1.ConditionFalse - ConditionUnknown = v1.ConditionUnknown - - IncludeNone = v1.IncludeNone - IncludeMetadata = v1.IncludeMetadata - IncludeObject = v1.IncludeObject -) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index 26d13f5d9..7394535d9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -27,14 +27,78 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "items contains each of the included items.", + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "items": "items contains each of the included items.", } func (PartialObjectMetadataList) SwaggerDoc() map[string]string { return map_PartialObjectMetadataList } +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index 89053b981..b77db1b15 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -21,20 +21,48 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]v1.PartialObjectMetadata, len(*in)) + *out = make([]*PartialObjectMetadata, len(*in)) for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PartialObjectMetadata) + (*in).DeepCopyInto(*out) + } } } return @@ -57,3 +85,105 @@ func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index 2f0dd0074..b3804aa42 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -54,6 +54,10 @@ func jsonTag(field reflect.StructField) (string, bool) { return tag, omitempty } +func formatValue(value interface{}) string { + return fmt.Sprintf("%v", value) +} + func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index abf3ace6f..32db4d96f 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -172,7 +172,7 @@ func ConvertSelectorToLabelsMap(selector string) (Set, error) { return labelsMap, err } value := strings.TrimSpace(l[1]) - if err := validateLabelValue(key, value); err != nil { + if err := validateLabelValue(value); err != nil { return labelsMap, err } labelsMap[key] = value diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 9be9e57d3..374d2ef13 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/klog" ) // Requirements is AND of all requirements. @@ -162,7 +162,7 @@ func NewRequirement(key string, op selection.Operator, vals []string) (*Requirem } for i := range vals { - if err := validateLabelValue(key, vals[i]); err != nil { + if err := validateLabelValue(vals[i]); err != nil { return nil, err } } @@ -211,13 +211,13 @@ func (r *Requirement) Matches(ls Labels) bool { } lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) if err != nil { - klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) return false } // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -225,7 +225,7 @@ func (r *Requirement) Matches(ls Labels) bool { for i := range r.strValues { rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) if err != nil { - klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) return false } } @@ -837,9 +837,9 @@ func validateLabelKey(k string) error { return nil } -func validateLabelValue(k, v string) error { +func validateLabelValue(v string) error { if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) + return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 80343081f..291d7a4e8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog" + "github.com/golang/glog" ) // UnstructuredConverter is an interface for converting between interface{} @@ -133,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) if (err != nil) != (newErr != nil) { - klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } if err == nil && !c.comparison.DeepEqual(obj, newObj) { - klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -424,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte newUnstr := map[string]interface{}{} newErr := toUnstructuredViaJSON(obj, &newUnstr) if (err != nil) != (newErr != nil) { - klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } if err == nil && !c.comparison.DeepEqual(u, newUnstr) { - klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { @@ -746,7 +746,7 @@ func isZero(v reflect.Value) bool { func structToUnstructured(sv, dv reflect.Value) error { st, dt := sv.Type(), dv.Type() if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.MakeMapWithSize(mapStringInterfaceType, st.NumField())) + dv.Set(reflect.MakeMap(mapStringInterfaceType)) dv = dv.Elem() dt = dv.Type() } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index be0c5edc8..322b0313d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -120,32 +120,3 @@ func IsMissingVersion(err error) bool { _, ok := err.(*missingVersionErr) return ok } - -// strictDecodingError is a base error type that is returned by a strict Decoder such -// as UniversalStrictDecoder. -type strictDecodingError struct { - message string - data string -} - -// NewStrictDecodingError creates a new strictDecodingError object. -func NewStrictDecodingError(message string, data string) error { - return &strictDecodingError{ - message: message, - data: data, - } -} - -func (e *strictDecodingError) Error() string { - return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) -} - -// IsStrictDecodingError returns true if the error indicates that the provided object -// strictness violations. -func IsStrictDecodingError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*strictDecodingError) - return ok -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 9b15989c8..967e0f530 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// DO NOT EDIT! /* Package runtime is a generated protocol buffer package. @@ -157,6 +158,24 @@ func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 7bd1a3a6a..33f11eb10 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { func SetField(src interface{}, v reflect.Value, fieldName string) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) } srcValue := reflect.ValueOf(src) if srcValue.Type().AssignableTo(field.Type()) { @@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error { func Field(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) } destValue, err := conversion.EnforcePtr(dest) if err != nil { @@ -93,7 +93,7 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error { func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) } v, err := conversion.EnforcePtr(dest) if err != nil { @@ -210,50 +210,3 @@ type defaultFramer struct{} func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } - -// WithVersionEncoder serializes an object and ensures the GVK is set. -type WithVersionEncoder struct { - Version GroupVersioner - Encoder - ObjectTyper -} - -// Encode does not do conversion. It sets the gvk during serialization. -func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { - gvks, _, err := e.ObjectTyper.ObjectKinds(obj) - if err != nil { - if IsNotRegisteredError(err) { - return e.Encoder.Encode(obj, stream) - } - return err - } - kind := obj.GetObjectKind() - oldGVK := kind.GroupVersionKind() - gvk := gvks[0] - if e.Version != nil { - preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) - if ok { - gvk = preferredGVK - } - } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err -} - -// WithoutVersionDecoder clears the group version kind of a deserialized object. -type WithoutVersionDecoder struct { - Decoder -} - -// Decode does not do conversion. It removes the gvk during deserialization. -func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { - obj, gvk, err := d.Decoder.Decode(data, defaults, into) - if obj != nil { - kind := obj.GetObjectKind() - // clearing the gvk is just a convention of a codec - kind.SetGroupVersionKind(schema.GroupVersionKind{}) - } - return obj, gvk, err -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index bded5bf15..699ff13e0 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -91,10 +91,6 @@ type Framer interface { type SerializerInfo struct { // MediaType is the value that represents this serializer over the wire. MediaType string - // MediaTypeType is the first part of the MediaType ("application" in "application/json"). - MediaTypeType string - // MediaTypeSubType is the second part of the MediaType ("json" in "application/json"). - MediaTypeSubType string // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. EncodesAsText bool // Serializer is the individual object serializer for this media type. @@ -210,25 +206,6 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } -// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource -type EquivalentResourceMapper interface { - // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource. - // If subresource is specified, only equivalent resources which also have the same subresource are included. - // The specified resource can be included in the returned list. - EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource - // KindFor returns the kind expected by the specified resource[/subresource]. - // A zero value is returned if the kind is unknown. - KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind -} - -// EquivalentResourceRegistry provides an EquivalentResourceMapper interface, -// and allows registering known resource[/subresource] -> kind -type EquivalentResourceRegistry interface { - EquivalentResourceMapper - // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind. - RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) -} - // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -260,9 +237,6 @@ type Object interface { // to JSON allowed. type Unstructured interface { Object - // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. - // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. - NewEmptyInstance() Unstructured // UnstructuredContent returns a non-nil map with this object's contents. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. SetUnstructuredContent should be used to mutate the contents. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go deleted file mode 100644 index 3ff84611a..000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "sync" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type equivalentResourceRegistry struct { - // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups). - // if null, or if "" is returned, resource.String() is used as the key - keyFunc func(resource schema.GroupResource) string - // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources). - // main resources are stored with subresource="". - resources map[string]map[string][]schema.GroupVersionResource - // kinds maps resource -> subresource -> kind - kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind - // keys caches the computed key for each GroupResource - keys map[schema.GroupResource]string - - mutex sync.RWMutex -} - -var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil) -var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil) - -// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent. -func NewEquivalentResourceRegistry() EquivalentResourceRegistry { - return &equivalentResourceRegistry{} -} - -// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function. -// If "" is returned by the function, GroupResource#String is used as the identity. -// GroupResources with the same identity string are considered equivalent. -func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry { - return &equivalentResourceRegistry{keyFunc: keyFunc} -} - -func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource { - r.mutex.RLock() - defer r.mutex.RUnlock() - return r.resources[r.keys[resource.GroupResource()]][subresource] -} -func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind { - r.mutex.RLock() - defer r.mutex.RUnlock() - return r.kinds[resource][subresource] -} -func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) { - r.mutex.Lock() - defer r.mutex.Unlock() - if r.kinds == nil { - r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{} - } - if r.kinds[resource] == nil { - r.kinds[resource] = map[string]schema.GroupVersionKind{} - } - r.kinds[resource][subresource] = kind - - // get the shared key of the parent resource - key := "" - gr := resource.GroupResource() - if r.keyFunc != nil { - key = r.keyFunc(gr) - } - if key == "" { - key = gr.String() - } - - if r.keys == nil { - r.keys = map[schema.GroupResource]string{} - } - r.keys[gr] = key - - if r.resources == nil { - r.resources = map[string]map[string][]schema.GroupVersionResource{} - } - if r.resources[key] == nil { - r.resources[key] = map[string][]schema.GroupVersionResource{} - } - r.resources[key][subresource] = append(r.resources[key][subresource], resource) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 28a61d5fb..5c9934c73 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// DO NOT EDIT! /* Package schema is a generated protocol buffer package. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 4c67ed598..5f02961d3 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -66,7 +66,7 @@ func (gr GroupResource) Empty() bool { return len(gr.Group) == 0 && len(gr.Resource) == 0 } -func (gr GroupResource) String() string { +func (gr *GroupResource) String() string { if len(gr.Group) == 0 { return gr.Resource } @@ -111,7 +111,7 @@ func (gvr GroupVersionResource) GroupVersion() GroupVersion { return GroupVersion{Group: gvr.Group, Version: gvr.Version} } -func (gvr GroupVersionResource) String() string { +func (gvr *GroupVersionResource) String() string { return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -130,7 +130,7 @@ func (gk GroupKind) WithVersion(version string) GroupVersionKind { return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} } -func (gk GroupKind) String() string { +func (gk *GroupKind) String() string { if len(gk.Group) == 0 { return gk.Kind } @@ -281,8 +281,8 @@ func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersio // ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that // do not use TypeMeta. -func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk.Empty() { +func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk == nil { return "", "" } return gvk.GroupVersion().String(), gvk.Kind diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 01f56c987..65f451124 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,13 +17,9 @@ limitations under the License. package serializer import ( - "mime" - "strings" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) @@ -52,8 +48,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []seri jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) - serializer := protobuf.NewSerializer(scheme, scheme) - raw := protobuf.NewRawSerializer(scheme, scheme) serializers := []serializerType{ { @@ -74,15 +68,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []seri EncodesAsText: true, Serializer: yamlSerializer, }, - { - AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, - ContentType: runtime.ContentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: serializer, - - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: raw, - }, } for _, fn := range serializerExtensions { @@ -135,15 +120,6 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec Serializer: d.Serializer, PrettySerializer: d.PrettySerializer, } - - mediaType, _, err := mime.ParseMediaType(info.MediaType) - if err != nil { - panic(err) - } - parts := strings.SplitN(mediaType, "/", 2) - info.MediaTypeType = parts[0] - info.MediaTypeSubType = parts[1] - if d.StreamSerializer != nil { info.StreamSerializer = &runtime.StreamSerializerInfo{ Serializer: d.StreamSerializer, @@ -172,12 +148,6 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } } -// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the -// caller requests it. -func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer { - return WithoutConversionCodecFactory{f} -} - // SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { return f.accepts @@ -245,30 +215,23 @@ func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.Grou return f.CodecForVersions(encoder, nil, gv, nil) } -// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion. -// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future -// will be unnecessary when we change the signature of NegotiatedSerializer. -type WithoutConversionCodecFactory struct { +// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. +type DirectCodecFactory struct { CodecFactory } -// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object -// when serialized. -func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { - return runtime.WithVersionEncoder{ +// EncoderForVersion returns an encoder that does not do conversion. +func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return versioning.DirectEncoder{ Version: version, Encoder: serializer, ObjectTyper: f.CodecFactory.scheme, } } -// DecoderToVersion returns an decoder that does not do conversion. -func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return runtime.WithoutVersionDecoder{ +// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. +func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return versioning.DirectDecoder{ Decoder: serializer, } } - -// DirectCodecFactory was renamed to WithoutConversionCodecFactory in 1.15. -// TODO: remove in 1.16. -type DirectCodecFactory = WithoutConversionCodecFactory diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 69ada8ecf..382c4858e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -22,9 +22,9 @@ import ( "strconv" "unsafe" + "github.com/ghodss/yaml" jsoniter "github.com/json-iterator/go" "github.com/modern-go/reflect2" - "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -35,56 +35,34 @@ import ( // NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer // is not nil, the object has the group, version, and kind fields set. -// Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: false, + pretty: pretty, + } } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer // is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that // matches JSON, and will error if constructs are used that do not serialize to JSON. -// Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) -} - -// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML -// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer -// and are immutable. -func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { return &Serializer{ meta: meta, creater: creater, typer: typer, - options: options, + yaml: true, } } -// SerializerOptions holds the options which are used to configure a JSON/YAML serializer. -// example: -// (1) To configure a JSON serializer, set `Yaml` to `false`. -// (2) To configure a YAML serializer, set `Yaml` to `true`. -// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`. -type SerializerOptions struct { - // Yaml: configures the Serializer to work with JSON(false) or YAML(true). - // When `Yaml` is enabled, this serializer only supports the subset of YAML that - // matches JSON, and will error if constructs are used that do not serialize to JSON. - Yaml bool - - // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. - // This option is silently ignored when `Yaml` is `true`. - Pretty bool - - // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. - // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. - Strict bool -} - type Serializer struct { meta MetaFactory - options SerializerOptions creater runtime.ObjectCreater typer runtime.ObjectTyper + yaml bool + pretty bool } // Serializer implements Serializer @@ -141,28 +119,11 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be -// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with -// the encoding/json standard library. -func StrictCaseSensitiveJsonIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - DisallowUnknownFields: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} - -// Private copies of jsoniter to try to shield against possible mutations +// Private copy of jsoniter to try to shield against possible mutations // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() -var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -199,7 +160,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } data := originalData - if s.options.Yaml { + if s.yaml { altered, err := yaml.YAMLToJSON(data) if err != nil { return nil, nil, err @@ -255,38 +216,12 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } - - // If the deserializer is non-strict, return successfully here. - if !s.options.Strict { - return obj, actual, nil - } - - // In strict mode pass the data trough the YAMLToJSONStrict converter. - // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, - // the output would equal the input, unless there is a parsing error such as duplicate fields. - // As we know this was successful in the non-strict case, the only error that may be returned here - // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError - // the actual error is that the object contains duplicate fields. - altered, err := yaml.YAMLToJSONStrict(originalData) - if err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) - } - // As performance is not an issue for now for the strict deserializer (one has regardless to do - // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated - // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is - // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, - // the actual error is that the object contains unknown field. - strictObj := obj.DeepCopyObject() - if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) - } - // Always return the same object as the non-strict serializer to avoid any deviations. return obj, actual, nil } // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if s.options.Yaml { + if s.yaml { json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err @@ -299,7 +234,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { return err } - if s.options.Pretty { + if s.pretty { data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err @@ -313,7 +248,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.options.Yaml { + if s.yaml { // we could potentially look for '---' return false, true, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index 8af889d35..b99ba25c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -69,18 +69,22 @@ func IsNotMarshalable(err error) bool { // NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer // is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written // as-is (any type info passed with the object will be used). -func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { +// +// This encoding scheme is experimental, and is subject to change at any time. +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer { return &Serializer{ - prefix: protoEncodingPrefix, - creater: creater, - typer: typer, + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, + contentType: defaultContentType, } } type Serializer struct { - prefix []byte - creater runtime.ObjectCreater - typer runtime.ObjectTyper + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string } var _ runtime.Serializer = &Serializer{} @@ -134,7 +138,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { *intoUnknown = unk if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { - intoUnknown.ContentType = runtime.ContentTypeProtobuf + intoUnknown.ContentType = s.contentType } return intoUnknown, &actual, nil } @@ -299,18 +303,20 @@ func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { // encoded object, and thus is not self describing (callers must know what type is being described in order to decode). // // This encoding scheme is experimental, and is subject to change at any time. -func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer { +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer { return &RawSerializer{ - creater: creater, - typer: typer, + creater: creater, + typer: typer, + contentType: defaultContentType, } } // RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying // type). type RawSerializer struct { - creater runtime.ObjectCreater - typer runtime.ObjectTyper + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string } var _ runtime.Serializer = &RawSerializer{} @@ -352,7 +358,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { intoUnknown.Raw = data intoUnknown.ContentEncoding = "" - intoUnknown.ContentType = runtime.ContentTypeProtobuf + intoUnknown.ContentType = s.contentType intoUnknown.SetGroupVersionKind(*actual) return intoUnknown, actual, nil } @@ -405,9 +411,6 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, if err := proto.Unmarshal(data, pb); err != nil { return nil, actual, err } - if actual != nil { - obj.GetObjectKind().SetGroupVersionKind(*actual) - } return obj, actual, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go new file mode 100644 index 000000000..545cf78df --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" +) + +const ( + // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from + // depending on it unintentionally. + // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the + // CodecFactory on initialization. + contentTypeProtobuf = "application/vnd.kubernetes.protobuf" +) + +func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { + serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) + raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) + return serializerType{ + AcceptContentTypes: []string{contentTypeProtobuf}, + ContentType: contentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: serializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: raw, + }, true +} + +func init() { + serializerExtensions = append(serializerExtensions, protobufSerializer) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index a04a2e98b..a5ae3ac4b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -18,7 +18,6 @@ package versioning import ( "io" - "reflect" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -91,28 +90,26 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru into = versioned.Last() } - // If the into object is unstructured and expresses an opinion about its group/version, - // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`) - decodeInto := into - if into != nil { - if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() { - decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object) - } - } - - obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) + obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) if err != nil { return nil, gvk, err } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil { return nil, gvk, err } } // if we specify a target, use generic conversion. if into != nil { + if into == obj { + if isVersioned { + return versioned, gvk, nil + } + return into, gvk, nil + } + // perform defaulting if requested if c.defaulter != nil { // create a copy to ensure defaulting is not applied to the original versioned objects @@ -126,14 +123,6 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } } - // Short-circuit conversion if the into object is same object - if into == obj { - if isVersioned { - return versioned, gvk, nil - } - return into, gvk, nil - } - if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { return nil, gvk, err } @@ -200,41 +189,84 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { return err } - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() - // restore the old GVK after encoding - defer objectKind.SetGroupVersionKind(old) - if c.encodeVersion == nil || isUnversioned { if e, ok := obj.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() objectKind.SetGroupVersionKind(gvks[0]) - return c.encoder.Encode(obj, w) + err = c.encoder.Encode(obj, w) + objectKind.SetGroupVersionKind(old) + return err } // Perform a conversion if necessary + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) if err != nil { return err } if e, ok := out.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object - return c.encoder.Encode(out, w) + err = c.encoder.Encode(out, w) + // restore the old GVK, in case conversion returned the same object + objectKind.SetGroupVersionKind(old) + return err } -// DirectEncoder was moved and renamed to runtime.WithVersionEncoder in 1.15. -// TODO: remove in 1.16. -type DirectEncoder = runtime.WithVersionEncoder +// DirectEncoder serializes an object and ensures the GVK is set. +type DirectEncoder struct { + Version runtime.GroupVersioner + runtime.Encoder + runtime.ObjectTyper +} -// DirectDecoder was moved and renamed to runtime.WithoutVersionDecoder in 1.15. -// TODO: remove in 1.16. -type DirectDecoder = runtime.WithoutVersionDecoder +// Encode does not do conversion. It sets the gvk during serialization. +func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if runtime.IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// DirectDecoder clears the group version kind of a deserialized object. +type DirectDecoder struct { + runtime.Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 3d3ebe5f9..e4515d8ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -41,9 +41,7 @@ type TypeMeta struct { } const ( - ContentTypeJSON string = "application/json" - ContentTypeYAML string = "application/yaml" - ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeJSON string = "application/json" ) // RawExtension is used to hold extensions in external versions. diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go index fe8ecaaff..d522d1dbd 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -25,5 +25,4 @@ const ( JSONPatchType PatchType = "application/json-patch+json" MergePatchType PatchType = "application/merge-patch+json" StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" - ApplyPatchType PatchType = "application/apply-patch+yaml" ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index a006b925a..06042617e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -18,12 +18,16 @@ package diff import ( "bytes" + "encoding/json" "fmt" + "reflect" + "sort" "strings" "text/tabwriter" "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/util/validation/field" ) // StringDiff diffs a and b and returns a human readable diff. @@ -46,29 +50,220 @@ func StringDiff(a, b string) string { return string(out) } -func legacyDiff(a, b interface{}) string { - return cmp.Diff(a, b) -} - -// ObjectDiff prints the diff of two go objects and fails if the objects -// contain unhandled unexported fields. -// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +// ObjectDiff writes the two objects out as JSON and prints out the identical part of +// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. +// For debugging tests. func ObjectDiff(a, b interface{}) string { - return legacyDiff(a, b) + ab, err := json.Marshal(a) + if err != nil { + panic(fmt.Sprintf("a: %v", err)) + } + bb, err := json.Marshal(b) + if err != nil { + panic(fmt.Sprintf("b: %v", err)) + } + return StringDiff(string(ab), string(bb)) } -// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects -// contain unhandled unexported fields. -// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, +// which shows absolutely everything by recursing into every single pointer +// (go's %#v formatters OTOH stop at a certain point). This is needed when you +// can't figure out why reflect.DeepEqual is returning false and nothing is +// showing you differences. This will. func ObjectGoPrintDiff(a, b interface{}) string { - return legacyDiff(a, b) + s := spew.ConfigState{DisableMethods: true} + return StringDiff( + s.Sprintf("%#v", a), + s.Sprintf("%#v", b), + ) } -// ObjectReflectDiff prints the diff of two go objects and fails if the objects -// contain unhandled unexported fields. -// DEPRECATED: use github.com/google/go-cmp/cmp.Diff func ObjectReflectDiff(a, b interface{}) string { - return legacyDiff(a, b) + vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) + if vA.Type() != vB.Type() { + return fmt.Sprintf("type A %T and type B %T do not match", a, b) + } + diffs := objectReflectDiff(field.NewPath("object"), vA, vB) + if len(diffs) == 0 { + return "" + } + out := []string{""} + for _, d := range diffs { + elidedA, elidedB := limit(d.a, d.b, 80) + out = append(out, + fmt.Sprintf("%s:", d.path), + fmt.Sprintf(" a: %s", elidedA), + fmt.Sprintf(" b: %s", elidedB), + ) + } + return strings.Join(out, "\n") +} + +// limit: +// 1. stringifies aObj and bObj +// 2. elides identical prefixes if either is too long +// 3. elides remaining content from the end if either is too long +func limit(aObj, bObj interface{}, max int) (string, string) { + elidedPrefix := "" + elidedASuffix := "" + elidedBSuffix := "" + a, b := fmt.Sprintf("%#v", aObj), fmt.Sprintf("%#v", bObj) + + if aObj != nil && bObj != nil { + if aType, bType := fmt.Sprintf("%T", aObj), fmt.Sprintf("%T", bObj); aType != bType { + a = fmt.Sprintf("%s (%s)", a, aType) + b = fmt.Sprintf("%s (%s)", b, bType) + } + } + + for { + switch { + case len(a) > max && len(a) > 4 && len(b) > 4 && a[:4] == b[:4]: + // a is too long, b has data, and the first several characters are the same + elidedPrefix = "..." + a = a[2:] + b = b[2:] + + case len(b) > max && len(b) > 4 && len(a) > 4 && a[:4] == b[:4]: + // b is too long, a has data, and the first several characters are the same + elidedPrefix = "..." + a = a[2:] + b = b[2:] + + case len(a) > max: + a = a[:max] + elidedASuffix = "..." + + case len(b) > max: + b = b[:max] + elidedBSuffix = "..." + + default: + // both are short enough + return elidedPrefix + a + elidedASuffix, elidedPrefix + b + elidedBSuffix + } + } +} + +func public(s string) bool { + if len(s) == 0 { + return false + } + return s[:1] == strings.ToUpper(s[:1]) +} + +type diff struct { + path *field.Path + a, b interface{} +} + +type orderedDiffs []diff + +func (d orderedDiffs) Len() int { return len(d) } +func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d orderedDiffs) Less(i, j int) bool { + a, b := d[i].path.String(), d[j].path.String() + if a < b { + return true + } + return false +} + +func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { + switch a.Type().Kind() { + case reflect.Struct: + var changes []diff + for i := 0; i < a.Type().NumField(); i++ { + if !public(a.Type().Field(i).Name) { + if reflect.DeepEqual(a.Interface(), b.Interface()) { + continue + } + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { + changes = append(changes, sub...) + } + } + return changes + case reflect.Ptr, reflect.Interface: + if a.IsNil() || b.IsNil() { + switch { + case a.IsNil() && b.IsNil(): + return nil + case a.IsNil(): + return []diff{{path: path, a: nil, b: b.Interface()}} + default: + return []diff{{path: path, a: a.Interface(), b: nil}} + } + } + return objectReflectDiff(path, a.Elem(), b.Elem()) + case reflect.Chan: + if !reflect.DeepEqual(a.Interface(), b.Interface()) { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + case reflect.Slice: + lA, lB := a.Len(), b.Len() + l := lA + if lB < lA { + l = lB + } + if lA == lB && lA == 0 { + if a.IsNil() != b.IsNil() { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + } + var diffs []diff + for i := 0; i < l; i++ { + if !reflect.DeepEqual(a.Index(i), b.Index(i)) { + diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...) + } + } + for i := l; i < lA; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) + } + for i := l; i < lB; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) + } + return diffs + case reflect.Map: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + aKeys := make(map[interface{}]interface{}) + for _, key := range a.MapKeys() { + aKeys[key.Interface()] = a.MapIndex(key).Interface() + } + var missing []diff + for _, key := range b.MapKeys() { + if _, ok := aKeys[key.Interface()]; ok { + delete(aKeys, key.Interface()) + if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { + continue + } + missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...) + continue + } + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) + } + for key, value := range aKeys { + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) + } + if len(missing) == 0 { + missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()}) + } + sort.Sort(orderedDiffs(missing)) + return missing + default: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + if !a.CanInterface() { + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } } // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 62a73f34e..88e937679 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -19,8 +19,6 @@ package errors import ( "errors" "fmt" - - "k8s.io/apimachinery/pkg/util/sets" ) // MessageCountMap contains occurrence for each error message. @@ -69,38 +67,12 @@ func (agg aggregate) Error() string { if len(agg) == 1 { return agg[0].Error() } - seenerrs := sets.NewString() - result := "" - agg.visit(func(err error) { - msg := err.Error() - if seenerrs.Has(msg) { - return - } - seenerrs.Insert(msg) - if len(seenerrs) > 1 { - result += ", " - } - result += msg - }) - if len(seenerrs) == 1 { - return result - } - return "[" + result + "]" -} - -func (agg aggregate) visit(f func(err error)) { - for _, err := range agg { - switch err := err.(type) { - case aggregate: - err.visit(f) - case Aggregate: - for _, nestedErr := range err.Errors() { - f(nestedErr) - } - default: - f(err) - } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) } + result += "]" + return result } // Errors is part of the Aggregate interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 48dd7d9c5..5c2ac4f23 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// DO NOT EDIT! /* Package intstr is a generated protocol buffer package. @@ -80,6 +81,24 @@ func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 5b26ed262..642b83cec 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,8 +25,8 @@ import ( "strconv" "strings" + "github.com/golang/glog" "github.com/google/gofuzz" - "k8s.io/klog" ) // IntOrString is a type that can hold an int32 or a string. When used in @@ -58,7 +58,7 @@ const ( // TODO: convert to (val int32) func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { - klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) } return IntOrString{Type: Int, IntVal: int32(val)} } diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index 990fa0d43..d09a939be 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -21,7 +21,7 @@ import ( "reflect" "github.com/davecgh/go-spew/spew" - "sigs.k8s.io/yaml" + "github.com/ghodss/yaml" ) // PreconditionFunc asserts that an incompatible change is not present within a patch. diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 078f00d9b..7c2a5e628 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -31,8 +31,8 @@ import ( "strconv" "strings" + "github.com/golang/glog" "golang.org/x/net/http2" - "k8s.io/klog" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -68,17 +68,14 @@ func IsProbableEOF(err error) bool { if uerr, ok := err.(*url.Error); ok { err = uerr.Err } - msg := err.Error() switch { case err == io.EOF: return true - case msg == "http: can't write HTTP request on broken connection": + case err.Error() == "http: can't write HTTP request on broken connection": return true - case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"): + case strings.Contains(err.Error(), "connection reset by peer"): return true - case strings.Contains(msg, "connection reset by peer"): - return true - case strings.Contains(strings.ToLower(msg), "use of closed network connection"): + case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): return true } return false @@ -110,10 +107,10 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { t = SetOldTransportDefaults(t) // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - klog.Infof("HTTP2 has been explicitly disabled") + glog.Infof("HTTP2 has been explicitly disabled") } else { if err := http2.ConfigureTransport(t); err != nil { - klog.Warningf("Transport failed http2 configuration: %v", err) + glog.Warningf("Transport failed http2 configuration: %v", err) } } return t @@ -371,7 +368,7 @@ redirectLoop: resp, err := http.ReadResponse(respReader, nil) if err != nil { // Unable to read the backend response; let the client handle it. - klog.Warningf("Error reading backend response: %v", err) + glog.Warningf("Error reading backend response: %v", err) break redirectLoop } diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index daf5d2496..0ab9b3608 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -26,7 +26,7 @@ import ( "strings" - "k8s.io/klog" + "github.com/golang/glog" ) type AddressFamily uint @@ -193,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool { return false } if intf.Flags&net.FlagUp != 0 { - klog.V(4).Infof("Interface %v is up", intf.Name) + glog.V(4).Infof("Interface %v is up", intf.Name) return true } return false @@ -208,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool { func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { - klog.V(4).Infof("Checking addr %s.", addrs[i].String()) + glog.V(4).Infof("Checking addr %s.", addrs[i].String()) ip, _, err := net.ParseCIDR(addrs[i].String()) if err != nil { return nil, err } if memberOf(ip, family) { if ip.IsGlobalUnicast() { - klog.V(4).Infof("IP found %v", ip) + glog.V(4).Infof("IP found %v", ip) return ip, nil } else { - klog.V(4).Infof("Non-global unicast address found %v", ip) + glog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -241,13 +241,13 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte if err != nil { return nil, err } - klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } if matchingIP != nil { - klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) return matchingIP, nil } } @@ -275,14 +275,14 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("no interfaces found on host.") } for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { - klog.V(4).Infof("Skipping: down interface %q", intf.Name) + glog.V(4).Infof("Skipping: down interface %q", intf.Name) continue } if isLoopbackOrPointToPoint(&intf) { - klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) continue } addrs, err := nw.Addrs(&intf) @@ -290,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, err } if len(addrs) == 0 { - klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) continue } for _, addr := range addrs { @@ -299,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { - klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) continue } // TODO: Decide if should open up to allow IPv6 LLAs in future. if !ip.IsGlobalUnicast() { - klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) continue } - klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) return ip, nil } } @@ -381,23 +381,23 @@ func getAllDefaultRoutes() ([]Route, error) { // an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { continue } - klog.V(4).Infof("Default route transits interface %q", route.Interface) + glog.V(4).Infof("Default route transits interface %q", route.Interface) finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - klog.V(4).Infof("Found active IP %v ", finalIP) + glog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - klog.V(4).Infof("No active IP found by looking at default routes") + glog.V(4).Infof("No active IP found by looking at default routes") return nil, fmt.Errorf("unable to select an IP from default routes.") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go index 82a473bb1..9421edae8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -27,14 +27,7 @@ var rng = struct { sync.Mutex rand *rand.Rand }{ - rand: rand.New(rand.NewSource(time.Now().UnixNano())), -} - -// Int returns a non-negative pseudo-random int. -func Int() int { - rng.Lock() - defer rng.Unlock() - return rng.rand.Int() + rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), } // Intn generates an integer in range [0,max). diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3c886f46c..76e203b9d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "k8s.io/klog" + "github.com/golang/glog" ) var ( @@ -62,18 +62,27 @@ func HandleCrash(additionalHandlers ...func(interface{})) { // logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { - // Same as stdlib http server code. Manually allocate stack trace buffer size - // to prevent excessively large logs - const size = 64 << 10 - stacktrace := make([]byte, size) - stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + callers := getCallers(r) if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + glog.Errorf("Observed a panic: %s\n%v", r, callers) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) } } +func getCallers(r interface{}) string { + callers := "" + for i := 0; true; i++ { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + callers = callers + fmt.Sprintf("%v:%v\n", file, line) + } + + return callers +} + // ErrorHandlers is a list of functions which will be invoked when an unreturnable // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function @@ -106,7 +115,7 @@ func HandleError(err error) { // logError prints an error with the call stack of the location it was reported func logError(err error) { - klog.ErrorDepth(2, err) + glog.ErrorDepth(2, err) } type rudimentaryErrorBackoff struct { @@ -146,17 +155,13 @@ func GetCaller() string { // handlers to handle errors and panics the same way. func RecoverFromPanic(err *error) { if r := recover(); r != nil { - // Same as stdlib http server code. Manually allocate stack trace buffer size - // to prevent excessively large logs - const size = 64 << 10 - stacktrace := make([]byte, size) - stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + callers := getCallers(r) *err = fmt.Errorf( - "recovered from panic %q. (err=%v) Call stack:\n%s", + "recovered from panic %q. (err=%v) Call stack:\n%v", r, *err, - stacktrace) + callers) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go deleted file mode 100644 index 584eabc8b..000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. -type Int32 map[int32]Empty - -// NewInt32 creates a Int32 from a list of values. -func NewInt32(items ...int32) Int32 { - ss := Int32{} - ss.Insert(items...) - return ss -} - -// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func Int32KeySet(theMap interface{}) Int32 { - v := reflect.ValueOf(theMap) - ret := Int32{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int32)) - } - return ret -} - -// Insert adds items to the set. -func (s Int32) Insert(items ...int32) { - for _, item := range items { - s[item] = Empty{} - } -} - -// Delete removes all items from the set. -func (s Int32) Delete(items ...int32) { - for _, item := range items { - delete(s, item) - } -} - -// Has returns true if and only if item is contained in the set. -func (s Int32) Has(item int32) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Int32) HasAll(items ...int32) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Int32) HasAny(items ...int32) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s Int32) Difference(s2 Int32) Int32 { - result := NewInt32() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Int32) Union(s2 Int32) Int32 { - result := NewInt32() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Int32) Intersection(s2 Int32) Int32 { - var walk, other Int32 - result := NewInt32() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Int32) IsSuperset(s2 Int32) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Int32) Equal(s2 Int32) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfInt32 []int32 - -func (s sortableSliceOfInt32) Len() int { return len(s) } -func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } -func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted int32 slice. -func (s Int32) List() []int32 { - res := make(sortableSliceOfInt32, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int32(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Int32) UnsortedList() []int32 { - res := make([]int32, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Int32) PopAny() (int32, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int32 - return zeroValue, false -} - -// Len returns the size of the set. -func (s Int32) Len() int { - return len(s) -} - -func lessInt32(lhs, rhs int32) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 2dd99992d..e0d171542 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -87,8 +87,6 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" - -// LabelValueMaxLength is a label's max length const LabelValueMaxLength int = 63 var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") @@ -109,8 +107,6 @@ func IsValidLabelValue(value string) []string { const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" - -// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) const DNS1123LabelMaxLength int = 63 var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") @@ -130,8 +126,6 @@ func IsDNS1123Label(value string) []string { const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" - -// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") @@ -151,8 +145,6 @@ func IsDNS1123Subdomain(value string) []string { const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" - -// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) const DNS1035LabelMaxLength int = 63 var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") @@ -290,7 +282,6 @@ const percentErrMsg string = "a valid percent string must be a numeric string fo var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") -// IsValidPercent checks that string is in the form of a percentage func IsValidPercent(percent string) []string { if !percentRegexp.MatchString(percent) { return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} @@ -400,13 +391,13 @@ func hasChDirPrefix(value string) []string { return errs } -// IsValidSocketAddr checks that string represents a valid socket address +// IsSocketAddr checks that a string conforms is a valid socket address // as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) func IsValidSocketAddr(value string) []string { var errs []string ip, port, err := net.SplitHostPort(value) if err != nil { - errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + return append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") return errs } portInt, _ := strconv.Atoi(port) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index bc6b18d2b..ca61168cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -88,15 +88,6 @@ func Until(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, true, stopCh) } -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - // NonSlidingUntil loops until stop channel is closed, running f every // period. // @@ -107,16 +98,6 @@ func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, false, stopCh) } -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - // JitterUntil loops until stop channel is closed, running f every period. // // If jitterFactor is positive, the period is jittered before every run of f. @@ -170,19 +151,6 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b } } -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -205,68 +173,10 @@ type ConditionFunc func() (done bool, err error) // Backoff holds parameters applied to a Backoff function. type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration. Must be greater - // than or equal to zero. - Factor float64 - // The amount of jitter applied each iteration. Jitter is applied after - // cap. - Jitter float64 - // The number of steps before duration stops changing. If zero, initial - // duration is always used. Used for exponential backoff in combination - // with Factor. - Steps int - // The returned duration will never be greater than cap *before* jitter - // is applied. The actual maximum cap is `cap * (1.0 + jitter)`. - Cap time.Duration -} - -// Step returns the next interval in the exponential backoff. This method -// will mutate the provided backoff. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// contextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel + Duration time.Duration // the base duration + Factor float64 // Duration is multiplied by factor each iteration + Jitter float64 // The amount of jitter applied each iteration + Steps int // Exit with error after this many steps } // ExponentialBackoff repeats a condition check with exponential backoff. @@ -280,14 +190,19 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance // If the condition never returns true, ErrWaitTimeout is returned. All other // errors terminate immediately. func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { + duration := backoff.Duration + for i := 0; i < backoff.Steps; i++ { + if i != 0 { + adjusted := duration + if backoff.Jitter > 0.0 { + adjusted = Jitter(duration, backoff.Jitter) + } + time.Sleep(adjusted) + duration = time.Duration(float64(duration) * backoff.Factor) + } if ok, err := condition(); err != nil || ok { return err } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) } return ErrWaitTimeout } @@ -372,9 +287,7 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro // PollUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return WaitFor(poller(interval, 0), condition, ctx.Done()) + return WaitFor(poller(interval, 0), condition, stopCh) } // PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. @@ -404,48 +317,36 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{} // WaitFor continually checks 'fn' as driven by 'wait'. // // WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. +// placed on the channel and once more when the channel is closed. // -// If 'fn' returns an error the loop ends and that error is returned. If +// If 'fn' returns an error the loop ends and that error is returned, and if // 'fn' returns true the loop ends and nil is returned. // -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever +// ErrWaitTimeout will be returned if the channel is closed without fn ever // returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - stopCh := make(chan struct{}) - defer close(stopCh) - c := wait(stopCh) + c := wait(done) for { - select { - case _, open := <-c: - ok, err := fn() - if err != nil { - return err - } - if ok { - return nil - } - if !open { - return ErrWaitTimeout - } - case <-done: - return ErrWaitTimeout + _, open := <-c + ok, err := fn() + if err != nil { + return err + } + if ok { + return nil + } + if !open { + break } } + return ErrWaitTimeout } // poller returns a WaitFunc that will send to the channel every interval until // timeout has elapsed and then closes the channel. // // Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. +// closed. A timeout of 0 is interpreted as an infinity. // // Output ticks are not buffered. If the channel is not ready to receive an // item, the tick is skipped. diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index a9a3853ac..3cd85515d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,8 +26,8 @@ import ( "strings" "unicode" - "k8s.io/klog" - "sigs.k8s.io/yaml" + "github.com/ghodss/yaml" + "github.com/golang/glog" ) // ToJSON converts a single YAML document into a JSON document @@ -217,9 +217,11 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { + glog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { + glog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } @@ -228,7 +230,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if syntax, ok := err.(*json.SyntaxError); ok { data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) if readErr != nil { - klog.V(4).Infof("reading stream failed: %v", readErr) + glog.V(4).Infof("reading stream failed: %v", readErr) } js := string(data) diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go index 29574fd6d..5e77af7ea 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:openapi-gen=true - // Package version supplies the type for version information collected at build time. +// +k8s:openapi-gen=true package version // import "k8s.io/apimachinery/pkg/version" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 8af256eb1..93bb1cdf7 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -17,12 +17,10 @@ limitations under the License. package watch import ( - "fmt" "io" "sync" - "k8s.io/klog" - + "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -41,28 +39,19 @@ type Decoder interface { Close() } -// Reporter hides the details of how an error is turned into a runtime.Object for -// reporting on a watch stream since this package may not import a higher level report. -type Reporter interface { - // AsObject must convert err into a valid runtime.Object for the watch stream. - AsObject(err error) runtime.Object -} - // StreamWatcher turns any stream for which you can write a Decoder interface // into a watch.Interface. type StreamWatcher struct { sync.Mutex - source Decoder - reporter Reporter - result chan Event - stopped bool + source Decoder + result chan Event + stopped bool } // NewStreamWatcher creates a StreamWatcher from the given decoder. -func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { +func NewStreamWatcher(d Decoder) *StreamWatcher { sw := &StreamWatcher{ - source: d, - reporter: r, + source: d, // It's easy for a consumer to add buffering via an extra // goroutine/channel, but impossible for them to remove it, // so nonbuffered is better. @@ -111,15 +100,13 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: + msg := "Unable to decode an event from the watch stream: %v" if net.IsProbableEOF(err) { - klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) + glog.V(5).Infof(msg, err) } else { - sw.result <- Event{ - Type: Error, - Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)), - } + glog.Errorf(msg, err) } } return diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index 3945be3ae..a627d1d57 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "k8s.io/klog" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" ) @@ -44,7 +44,6 @@ const ( Added EventType = "ADDED" Modified EventType = "MODIFIED" Deleted EventType = "DELETED" - Bookmark EventType = "BOOKMARK" Error EventType = "ERROR" DefaultChanSize int32 = 100 @@ -58,10 +57,6 @@ type Event struct { // Object is: // * If Type is Added or Modified: the new state of the object. // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Bookmark: the object (instance of a type being watched) where - // only ResourceVersion field is set. On successful restart of watch from a - // bookmark resourceVersion, client is guaranteed to not get repeat event - // nor miss any events. // * If Type is Error: *api.Status is recommended; other types may make sense // depending on context. Object runtime.Object @@ -111,7 +106,7 @@ func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - klog.V(4).Infof("Stopping fake watcher.") + glog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } @@ -178,7 +173,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - klog.V(4).Infof("Stopping fake watcher.") + glog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go new file mode 100644 index 000000000..d38a0bbda --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached_discovery.go @@ -0,0 +1,282 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "errors" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + "github.com/golang/glog" + "github.com/googleapis/gnostic/OpenAPIv2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" +) + +// CachedDiscoveryClient implements the functions that discovery server-supported API groups, +// versions and resources. +type CachedDiscoveryClient struct { + delegate DiscoveryInterface + + // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. + cacheDirectory string + + // ttl is how long the cache should be considered valid + ttl time.Duration + + // mutex protects the variables below + mutex sync.Mutex + + // ourFiles are all filenames of cache files created by this process + ourFiles map[string]struct{} + // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) + invalidated bool + // fresh is true if all used cache files were ours + fresh bool +} + +var _ CachedDiscoveryInterface = &CachedDiscoveryClient{} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedResources := &metav1.APIResourceList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedResources, nil + } + } + + liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + glog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveResources, err + } + if liveResources == nil || len(liveResources.APIResources) == 0 { + glog.V(3).Infof("skipped caching discovery info, no resources found") + return liveResources, err + } + + if err := d.writeCachedFile(filename, liveResources); err != nil { + glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveResources, nil +} + +// ServerResources returns the supported resources for all groups and versions. +func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + return ServerResources(d) +} + +func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { + filename := filepath.Join(d.cacheDirectory, "servergroups.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedGroups := &metav1.APIGroupList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedGroups, nil + } + } + + liveGroups, err := d.delegate.ServerGroups() + if err != nil { + glog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveGroups, err + } + if liveGroups == nil || len(liveGroups.Groups) == 0 { + glog.V(3).Infof("skipped caching discovery info, no groups found") + return liveGroups, err + } + + if err := d.writeCachedFile(filename, liveGroups); err != nil { + glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveGroups, nil +} + +func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { + // after invalidation ignore cache files not created by this process + d.mutex.Lock() + _, ourFile := d.ourFiles[filename] + if d.invalidated && !ourFile { + d.mutex.Unlock() + return nil, errors.New("cache invalidated") + } + d.mutex.Unlock() + + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return nil, err + } + + if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { + return nil, errors.New("cache expired") + } + + // the cache is present and its valid. Try to read and use it. + cachedBytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + d.mutex.Lock() + defer d.mutex.Unlock() + d.fresh = d.fresh && ourFile + + return cachedBytes, nil +} + +func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { + if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + return err + } + + bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) + if err != nil { + return err + } + + f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") + if err != nil { + return err + } + defer os.Remove(f.Name()) + _, err = f.Write(bytes) + if err != nil { + return err + } + + err = os.Chmod(f.Name(), 0755) + if err != nil { + return err + } + + name := f.Name() + err = f.Close() + if err != nil { + return err + } + + // atomic rename + d.mutex.Lock() + defer d.mutex.Unlock() + err = os.Rename(name, filename) + if err == nil { + d.ourFiles[filename] = struct{}{} + } + return err +} + +func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredResources(d) +} + +func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredNamespacedResources(d) +} + +func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *CachedDiscoveryClient) Fresh() bool { + d.mutex.Lock() + defer d.mutex.Unlock() + + return d.fresh +} + +func (d *CachedDiscoveryClient) Invalidate() { + d.mutex.Lock() + defer d.mutex.Unlock() + + d.ourFiles = map[string]struct{}{} + d.fresh = true + d.invalidated = true +} + +// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps +// the created client in a CachedDiscoveryClient. The provided configuration is updated with a +// custom transport that understands cache responses. +// We receive two distinct cache directories for now, in order to preserve old behavior +// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, +// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing +// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not +// be updated with a roundtripper that understands cache responses. +// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. +// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir +// so that server resources and http-cache data are stored in the same location, provided via config flags. +func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { + if len(httpCacheDir) > 0 { + // update the given restconfig with a custom roundtripper that + // understands how to handle cache responses. + wt := config.WrapTransport + config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } + return newCacheRoundTripper(httpCacheDir, rt) + } + } + + discoveryClient, err := NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + + return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil +} + +// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. +func newCachedDiscoveryClient(delegate DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { + return &CachedDiscoveryClient{ + delegate: delegate, + cacheDirectory: cacheDirectory, + ttl: ttl, + ourFiles: map[string]struct{}{}, + fresh: true, + } +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 61b9c4481..a96602974 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + "github.com/googleapis/gnostic/OpenAPIv2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,9 +60,6 @@ type DiscoveryInterface interface { } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. -// Note that If the ServerResourcesForGroupVersion method returns a cache miss -// error, the user needs to explicitly call Invalidate to clear the cache, -// otherwise the same cache miss error will be returned next time. type CachedDiscoveryInterface interface { DiscoveryInterface // Fresh is supposed to tell the caller whether or not to retry if the cache @@ -71,8 +68,7 @@ type CachedDiscoveryInterface interface { // TODO: this needs to be revisited, this interface can't be locked properly // and doesn't make a lot of sense. Fresh() bool - // Invalidate enforces that no cached data that is older than the current time - // is used. + // Invalidate enforces that no cached data is used in the future that is older than the current time. Invalidate() } @@ -88,28 +84,12 @@ type ServerResourcesInterface interface { // ServerResourcesForGroupVersion returns the supported resources for a group and version. ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) // ServerResources returns the supported resources for all groups and versions. - // - // The returned resource list might be non-nil with partial results even in the case of - // non-nil error. - // - // Deprecated: use ServerGroupsAndResources instead. ServerResources() ([]*metav1.APIResourceList, error) - // ServerResources returns the supported groups and resources for all groups and versions. - // - // The returned group and resource lists might be non-nil with partial results even in the - // case of non-nil error. - ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) // ServerPreferredResources returns the supported resources with the version preferred by the // server. - // - // The returned group and resource lists might be non-nil with partial results even in the - // case of non-nil error. ServerPreferredResources() ([]*metav1.APIResourceList, error) // ServerPreferredNamespacedResources returns the supported namespaced resources with the // version preferred by the server. - // - // The returned resource list might be non-nil with partial results even in the case of - // non-nil error. ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) } @@ -207,18 +187,14 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r return resources, nil } -// ServerResources returns the supported resources for all groups and versions. -// Deprecated: use ServerGroupsAndResources instead. -func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - _, rs, err := d.ServerGroupsAndResources() - return rs, err +// serverResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) { + return ServerResources(d) } -// ServerGroupsAndResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - return ServerGroupsAndResources(d) - }) +// ServerResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + return withRetries(defaultRetries, d.serverResources) } // ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. @@ -244,28 +220,23 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } -// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. -// Deprecated: use ServerGroupsAndResources instead. -func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - _, rs, err := ServerGroupsAndResources(d) - return rs, err +// serverPreferredResources returns the supported resources with the version preferred by the server. +func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredResources(d) } -func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - sgs, err := d.ServerGroups() - if sgs == nil { - return nil, nil, err - } - resultGroups := []*metav1.APIGroup{} - for i := range sgs.Groups { - resultGroups = append(resultGroups, &sgs.Groups[i]) +// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. +func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { + apiGroups, err := d.ServerGroups() + if err != nil { + return nil, err } - groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) + groupVersionResources, failedGroups := fetchGroupVersionResources(d, apiGroups) // order results by group/version discovery order result := []*metav1.APIResourceList{} - for _, apiGroup := range sgs.Groups { + for _, apiGroup := range apiGroups.Groups { for _, version := range apiGroup.Versions { gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} if resources, ok := groupVersionResources[gv]; ok { @@ -275,10 +246,10 @@ func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*meta } if len(failedGroups) == 0 { - return resultGroups, result, nil + return result, nil } - return resultGroups, result, &ErrGroupDiscoveryFailed{Groups: failedGroups} + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } // ServerPreferredResources uses the provided discovery interface to look up preferred resources @@ -292,8 +263,8 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource - grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource - gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping for _, apiGroup := range serverGroupList.Groups { for _, version := range apiGroup.Versions { @@ -305,11 +276,11 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, } // create empty list which is filled later in another loop - emptyAPIResourceList := metav1.APIResourceList{ + emptyApiResourceList := metav1.APIResourceList{ GroupVersion: version.GroupVersion, } - gvAPIResourceLists[groupVersion] = &emptyAPIResourceList - result = append(result, &emptyAPIResourceList) + gvApiResourceLists[groupVersion] = &emptyApiResourceList + result = append(result, &emptyApiResourceList) for i := range apiResourceList.APIResources { apiResource := &apiResourceList.APIResources[i] @@ -317,21 +288,21 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, continue } gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} - if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { // only override with preferred version continue } grVersions[gv] = version.Version - grAPIResources[gv] = apiResource + grApiResources[gv] = apiResource } } } // group selected APIResources according to GroupVersion into APIResourceLists - for groupResource, apiResource := range grAPIResources { + for groupResource, apiResource := range grApiResources { version := grVersions[groupResource] groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} - apiResourceList := gvAPIResourceLists[groupVersion] + apiResourceList := gvApiResourceLists[groupVersion] apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) } @@ -342,7 +313,7 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } -// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel. +// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) @@ -366,9 +337,7 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup if err != nil { // TODO: maybe restrict this to NotFound errors failedGroups[groupVersion] = err - } - if apiResourceList != nil { - // even in case of error, some fallback might have been returned + } else { groupVersionResources[groupVersion] = apiResourceList } }() @@ -382,11 +351,7 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup // ServerPreferredResources returns the supported resources with the version preferred by the // server. func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - _, rs, err := withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - rs, err := ServerPreferredResources(d) - return nil, rs, err - }) - return rs, err + return withRetries(defaultRetries, d.serverPreferredResources) } // ServerPreferredNamespacedResources returns the supported namespaced resources with the @@ -412,7 +377,7 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { var info version.Info err = json.Unmarshal(body, &info) if err != nil { - return nil, fmt.Errorf("unable to parse the server version: %v", err) + return nil, fmt.Errorf("got '%s': %v", string(body), err) } return &info, nil } @@ -423,7 +388,7 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { if err != nil { if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO: remove this when kubectl/client-go don't work with 1.9 server + // TODO(roycaihw): remove this in 1.11 data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() if err != nil { return nil, err @@ -441,20 +406,19 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { } // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. -func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { +func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { var result []*metav1.APIResourceList - var resultGroups []*metav1.APIGroup var err error for i := 0; i < maxRetries; i++ { - resultGroups, result, err = f() + result, err = f() if err == nil { - return resultGroups, result, nil + return result, nil } if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { - return nil, nil, err + return nil, err } } - return resultGroups, result, err + return result, err } func setDiscoveryDefaults(config *restclient.Config) error { @@ -500,9 +464,9 @@ func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (d *DiscoveryClient) RESTClient() restclient.Interface { - if d == nil { +func (c *DiscoveryClient) RESTClient() restclient.Interface { + if c == nil { return nil } - return d.restClient + return c.restClient } diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go index 335473dd1..984a0ba1e 100644 --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -36,8 +36,6 @@ type FakeDiscovery struct { FakedServerVersion *version.Info } -// ServerResourcesForGroupVersion returns the supported resources for a group -// and version. func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { action := testing.ActionImpl{ Verb: "get", @@ -52,46 +50,23 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*me return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) } -// ServerResources returns the supported resources for all groups and versions. -// Deprecated: use ServerGroupsAndResources instead. func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { - _, rs, err := c.ServerGroupsAndResources() - return rs, err -} - -// ServerGroupsAndResources returns the supported groups and resources for all groups and versions. -func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - sgs, err := c.ServerGroups() - if err != nil { - return nil, nil, err - } - resultGroups := []*metav1.APIGroup{} - for i := range sgs.Groups { - resultGroups = append(resultGroups, &sgs.Groups[i]) - } - action := testing.ActionImpl{ Verb: "get", Resource: schema.GroupVersionResource{Resource: "resource"}, } c.Invokes(action, nil) - return resultGroups, c.Resources, nil + return c.Resources, nil } -// ServerPreferredResources returns the supported resources with the version -// preferred by the server. func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { return nil, nil } -// ServerPreferredNamespacedResources returns the supported namespaced resources -// with the version preferred by the server. func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { return nil, nil } -// ServerGroups returns the supported groups, with information like supported -// versions and the preferred version. func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { action := testing.ActionImpl{ Verb: "get", @@ -133,7 +108,6 @@ func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { } -// ServerVersion retrieves and parses the server's version. func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := testing.ActionImpl{} action.Verb = "get" @@ -148,13 +122,10 @@ func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { return &versionInfo, nil } -// OpenAPISchema retrieves and parses the swagger API schema the server supports. func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { return &openapi_v2.Document{}, nil } -// RESTClient returns a RESTClient that is used to communicate with API server -// by this client implementation. func (c *FakeDiscovery) RESTClient() restclient.Interface { return nil } diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go index 3bfe514e8..353d34b3c 100644 --- a/vendor/k8s.io/client-go/discovery/helper.go +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -31,11 +31,11 @@ import ( func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { sVer, err := client.ServerVersion() if err != nil { - return fmt.Errorf("couldn't read version from server: %v", err) + return fmt.Errorf("couldn't read version from server: %v\n", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) + return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) } return nil @@ -101,15 +101,12 @@ func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1 return result } -// ResourcePredicate has a method to check if a resource matches a given condition. type ResourcePredicate interface { Match(groupVersion string, r *metav1.APIResource) bool } -// ResourcePredicateFunc returns true if it matches a resource based on a custom condition. type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool -// Match is a wrapper around ResourcePredicateFunc. func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { return fn(groupVersion, r) } @@ -119,7 +116,6 @@ type SupportsAllVerbs struct { Verbs []string } -// Match checks if a resource contains all the given verbs. func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) } diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go new file mode 100644 index 000000000..75b7f5209 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/round_tripper.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + "path/filepath" + + "github.com/golang/glog" + "github.com/gregjones/httpcache" + "github.com/gregjones/httpcache/diskcache" + "github.com/peterbourgon/diskv" +) + +type cacheRoundTripper struct { + rt *httpcache.Transport +} + +// newCacheRoundTripper creates a roundtripper that reads the ETag on +// response headers and send the If-None-Match header on subsequent +// corresponding requests. +func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { + d := diskv.New(diskv.Options{ + BasePath: cacheDir, + TempDir: filepath.Join(cacheDir, ".diskv-temp"), + }) + t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) + t.Transport = rt + + return &cacheRoundTripper{rt: t} +} + +func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.rt.RoundTrip(req) +} + +func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := rt.rt.Transport.(canceler); ok { + cr.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) + } +} + +func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/vendor/k8s.io/client-go/dynamic/interface.go b/vendor/k8s.io/client-go/dynamic/interface.go index 70756a4f5..c457be178 100644 --- a/vendor/k8s.io/client-go/dynamic/interface.go +++ b/vendor/k8s.io/client-go/dynamic/interface.go @@ -37,7 +37,7 @@ type ResourceInterface interface { Get(name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) + Patch(name string, pt types.PatchType, data []byte, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) } type NamespaceableResourceInterface interface { diff --git a/vendor/k8s.io/client-go/dynamic/scheme.go b/vendor/k8s.io/client-go/dynamic/scheme.go index 4596104d8..c4aa081f9 100644 --- a/vendor/k8s.io/client-go/dynamic/scheme.go +++ b/vendor/k8s.io/client-go/dynamic/scheme.go @@ -43,8 +43,6 @@ func init() { var watchJsonSerializerInfo = runtime.SerializerInfo{ MediaType: "application/json", - MediaTypeType: "application", - MediaTypeSubType: "json", EncodesAsText: true, Serializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, false), PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, watchScheme, watchScheme, true), @@ -79,8 +77,6 @@ func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInf return []runtime.SerializerInfo{ { MediaType: "application/json", - MediaTypeType: "application", - MediaTypeSubType: "json", EncodesAsText: true, Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, true), diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 4e0ef5a7d..9e21cda6e 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -17,7 +17,6 @@ limitations under the License. package dynamic import ( - "fmt" "io" "k8s.io/apimachinery/pkg/api/meta" @@ -37,19 +36,6 @@ type dynamicClient struct { var _ Interface = &dynamicClient{} -// ConfigFor returns a copy of the provided config with the -// appropriate dynamic client defaults set. -func ConfigFor(inConfig *rest.Config) *rest.Config { - config := rest.CopyConfig(inConfig) - config.AcceptContentTypes = "application/json" - config.ContentType = "application/json" - config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - return config -} - // NewForConfigOrDie creates a new Interface for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) Interface { @@ -60,12 +46,17 @@ func NewForConfigOrDie(c *rest.Config) Interface { return ret } -// NewForConfig creates a new dynamic client or returns an error. func NewForConfig(inConfig *rest.Config) (Interface, error) { - config := ConfigFor(inConfig) + config := rest.CopyConfig(inConfig) // for serializing the options config.GroupVersion = &schema.GroupVersion{} config.APIPath = "/if-you-see-this-search-for-the-break" + config.AcceptContentTypes = "application/json" + config.ContentType = "application/json" + config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } restClient, err := rest.RESTClientFor(config) if err != nil { @@ -103,9 +94,6 @@ func (c *dynamicResourceClient) Create(obj *unstructured.Unstructured, opts meta return nil, err } name = accessor.GetName() - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } } result := c.client.client. @@ -134,10 +122,6 @@ func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts meta if err != nil { return nil, err } - name := accessor.GetName() - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, err @@ -145,7 +129,7 @@ func (c *dynamicResourceClient) Update(obj *unstructured.Unstructured, opts meta result := c.client.client. Put(). - AbsPath(append(c.makeURLSegments(name), subresources...)...). + AbsPath(append(c.makeURLSegments(accessor.GetName()), subresources...)...). Body(outBytes). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). Do() @@ -169,10 +153,6 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt if err != nil { return nil, err } - name := accessor.GetName() - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { @@ -181,7 +161,7 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt result := c.client.client. Put(). - AbsPath(append(c.makeURLSegments(name), "status")...). + AbsPath(append(c.makeURLSegments(accessor.GetName()), "status")...). Body(outBytes). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). Do() @@ -201,9 +181,6 @@ func (c *dynamicResourceClient) UpdateStatus(obj *unstructured.Unstructured, opt } func (c *dynamicResourceClient) Delete(name string, opts *metav1.DeleteOptions, subresources ...string) error { - if len(name) == 0 { - return fmt.Errorf("name is required") - } if opts == nil { opts = &metav1.DeleteOptions{} } @@ -239,9 +216,6 @@ func (c *dynamicResourceClient) DeleteCollection(opts *metav1.DeleteOptions, lis } func (c *dynamicResourceClient) Get(name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do() if err := result.Error(); err != nil { return nil, err @@ -309,10 +283,7 @@ func (c *dynamicResourceClient) Watch(opts metav1.ListOptions) (watch.Interface, WatchWithSpecificDecoders(wrappedDecoderFn, unstructured.UnstructuredJSONScheme) } -func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } +func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { result := c.client.client. Patch(pt). AbsPath(append(c.makeURLSegments(name), subresources...)...). diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go index f56fe31e2..7a0783cc3 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -19,12 +19,15 @@ limitations under the License. package admissionregistration import ( + v1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -40,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 000000000..4cfaae5bd --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// InitializerConfigurationInformer provides access to a shared informer and lister for +// InitializerConfigurations. +type InitializerConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.InitializerConfigurationLister +} + +type initializerConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().InitializerConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().InitializerConfigurations().Watch(options) + }, + }, + &admissionregistrationv1alpha1.InitializerConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *initializerConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *initializerConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1alpha1.InitializerConfiguration{}, f.defaultInformer) +} + +func (f *initializerConfigurationInformer) Lister() v1alpha1.InitializerConfigurationLister { + return v1alpha1.NewInitializerConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go similarity index 78% rename from vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go rename to vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go index c56442957..0f47d65d8 100644 --- a/vendor/k8s.io/client-go/informers/node/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -24,8 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // RuntimeClasses returns a RuntimeClassInformer. - RuntimeClasses() RuntimeClassInformer + // InitializerConfigurations returns a InitializerConfigurationInformer. + InitializerConfigurations() InitializerConfigurationInformer } type version struct { @@ -39,7 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// RuntimeClasses returns a RuntimeClassInformer. -func (v *version) RuntimeClasses() RuntimeClassInformer { - return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +// InitializerConfigurations returns a InitializerConfigurationInformer. +func (v *version) InitializerConfigurations() InitializerConfigurationInformer { + return &initializerConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go deleted file mode 100644 index 69778ad2c..000000000 --- a/vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/auditregistration/v1alpha1" - cache "k8s.io/client-go/tools/cache" -) - -// AuditSinkInformer provides access to a shared informer and lister for -// AuditSinks. -type AuditSinkInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.AuditSinkLister -} - -type auditSinkInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewAuditSinkInformer constructs a new informer for AuditSink type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredAuditSinkInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredAuditSinkInformer constructs a new informer for AuditSink type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.AuditregistrationV1alpha1().AuditSinks().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.AuditregistrationV1alpha1().AuditSinks().Watch(options) - }, - }, - &auditregistrationv1alpha1.AuditSink{}, - resyncPeriod, - indexers, - ) -} - -func (f *auditSinkInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredAuditSinkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *auditSinkInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&auditregistrationv1alpha1.AuditSink{}, f.defaultInformer) -} - -func (f *auditSinkInformer) Lister() v1alpha1.AuditSinkLister { - return v1alpha1.NewAuditSinkLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/coordination/interface.go b/vendor/k8s.io/client-go/informers/coordination/interface.go index 54cfd7b9f..8e541d80d 100644 --- a/vendor/k8s.io/client-go/informers/coordination/interface.go +++ b/vendor/k8s.io/client-go/informers/coordination/interface.go @@ -19,15 +19,12 @@ limitations under the License. package coordination import ( - v1 "k8s.io/client-go/informers/coordination/v1" v1beta1 "k8s.io/client-go/informers/coordination/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1 provides access to shared informers for resources in V1. - V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -43,11 +40,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1 returns a new v1.Interface. -func (g *group) V1() v1.Interface { - return v1.New(g.factory, g.namespace, g.tweakListOptions) -} - // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go index 6f0bea7e8..a259d27ae 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -30,8 +30,6 @@ type Interface interface { Deployments() DeploymentInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer - // NetworkPolicies returns a NetworkPolicyInformer. - NetworkPolicies() NetworkPolicyInformer // PodSecurityPolicies returns a PodSecurityPolicyInformer. PodSecurityPolicies() PodSecurityPolicyInformer // ReplicaSets returns a ReplicaSetInformer. @@ -64,11 +62,6 @@ func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// NetworkPolicies returns a NetworkPolicyInformer. -func (v *version) NetworkPolicies() NetworkPolicyInformer { - return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - // PodSecurityPolicies returns a PodSecurityPolicyInformer. func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index b3a043009..7ae22ee2c 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -28,7 +28,6 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" admissionregistration "k8s.io/client-go/informers/admissionregistration" apps "k8s.io/client-go/informers/apps" - auditregistration "k8s.io/client-go/informers/auditregistration" autoscaling "k8s.io/client-go/informers/autoscaling" batch "k8s.io/client-go/informers/batch" certificates "k8s.io/client-go/informers/certificates" @@ -38,7 +37,6 @@ import ( extensions "k8s.io/client-go/informers/extensions" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" networking "k8s.io/client-go/informers/networking" - node "k8s.io/client-go/informers/node" policy "k8s.io/client-go/informers/policy" rbac "k8s.io/client-go/informers/rbac" scheduling "k8s.io/client-go/informers/scheduling" @@ -190,7 +188,6 @@ type SharedInformerFactory interface { Admissionregistration() admissionregistration.Interface Apps() apps.Interface - Auditregistration() auditregistration.Interface Autoscaling() autoscaling.Interface Batch() batch.Interface Certificates() certificates.Interface @@ -199,7 +196,6 @@ type SharedInformerFactory interface { Events() events.Interface Extensions() extensions.Interface Networking() networking.Interface - Node() node.Interface Policy() policy.Interface Rbac() rbac.Interface Scheduling() scheduling.Interface @@ -215,10 +211,6 @@ func (f *sharedInformerFactory) Apps() apps.Interface { return apps.New(f, f.namespace, f.tweakListOptions) } -func (f *sharedInformerFactory) Auditregistration() auditregistration.Interface { - return auditregistration.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { return autoscaling.New(f, f.namespace, f.tweakListOptions) } @@ -251,10 +243,6 @@ func (f *sharedInformerFactory) Networking() networking.Interface { return networking.New(f, f.namespace, f.tweakListOptions) } -func (f *sharedInformerFactory) Node() node.Interface { - return node.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Policy() policy.Interface { return policy.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 8b986a963..3af96304a 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -21,11 +21,11 @@ package informers import ( "fmt" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" - v1alpha1 "k8s.io/api/auditregistration/v1alpha1" autoscalingv1 "k8s.io/api/autoscaling/v1" v2beta1 "k8s.io/api/autoscaling/v2beta1" v2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -33,20 +33,15 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" v2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" - coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" - networkingv1beta1 "k8s.io/api/networking/v1beta1" - nodev1alpha1 "k8s.io/api/node/v1alpha1" - nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -83,7 +78,11 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=admissionregistration.k8s.io, Version=v1beta1 + // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("initializerconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().InitializerConfigurations().Informer()}, nil + + // Group=admissionregistration.k8s.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): @@ -121,10 +120,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1beta2.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil - // Group=auditregistration.k8s.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("auditsinks"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Auditregistration().V1alpha1().AuditSinks().Informer()}, nil - // Group=autoscaling, Version=v1 case autoscalingv1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil @@ -153,10 +148,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case certificatesv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil - // Group=coordination.k8s.io, Version=v1 - case coordinationv1.SchemeGroupVersion.WithResource("leases"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1().Leases().Informer()}, nil - // Group=coordination.k8s.io, Version=v1beta1 case coordinationv1beta1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1beta1().Leases().Informer()}, nil @@ -206,8 +197,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Deployments().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil - case extensionsv1beta1.SchemeGroupVersion.WithResource("networkpolicies"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().NetworkPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): @@ -217,18 +206,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil - // Group=networking.k8s.io, Version=v1beta1 - case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil - - // Group=node.k8s.io, Version=v1alpha1 - case nodev1alpha1.SchemeGroupVersion.WithResource("runtimeclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1alpha1().RuntimeClasses().Informer()}, nil - - // Group=node.k8s.io, Version=v1beta1 - case nodev1beta1.SchemeGroupVersion.WithResource("runtimeclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Node().V1beta1().RuntimeClasses().Informer()}, nil - // Group=policy, Version=v1beta1 case policyv1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil @@ -265,10 +242,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil - // Group=scheduling.k8s.io, Version=v1 - case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1().PriorityClasses().Informer()}, nil - // Group=scheduling.k8s.io, Version=v1alpha1 case schedulingv1alpha1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PriorityClasses().Informer()}, nil @@ -284,18 +257,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=storage.k8s.io, Version=v1 case storagev1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil - case storagev1.SchemeGroupVersion.WithResource("volumeattachments"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().VolumeAttachments().Informer()}, nil // Group=storage.k8s.io, Version=v1alpha1 case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil // Group=storage.k8s.io, Version=v1beta1 - case storagev1beta1.SchemeGroupVersion.WithResource("csidrivers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().CSIDrivers().Informer()}, nil - case storagev1beta1.SchemeGroupVersion.WithResource("csinodes"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().CSINodes().Informer()}, nil case storagev1beta1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil case storagev1beta1.SchemeGroupVersion.WithResource("volumeattachments"): diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go index b00ed70cf..5e05516b1 100644 --- a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -27,7 +27,6 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// NewInformerFunc takes kubernetes.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -36,5 +35,4 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/informers/networking/interface.go b/vendor/k8s.io/client-go/informers/networking/interface.go index 4a028d5d1..989e8fa0f 100644 --- a/vendor/k8s.io/client-go/informers/networking/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/interface.go @@ -21,15 +21,12 @@ package networking import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" v1 "k8s.io/client-go/informers/networking/v1" - v1beta1 "k8s.io/client-go/informers/networking/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface - // V1beta1 provides access to shared informers for resources in V1beta1. - V1beta1() v1beta1.Interface } type group struct { @@ -47,8 +44,3 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } - -// V1beta1 returns a new v1beta1.Interface. -func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go deleted file mode 100644 index 31edf930a..000000000 --- a/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - nodev1alpha1 "k8s.io/api/node/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/node/v1alpha1" - cache "k8s.io/client-go/tools/cache" -) - -// RuntimeClassInformer provides access to a shared informer and lister for -// RuntimeClasses. -type RuntimeClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.RuntimeClassLister -} - -type runtimeClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NodeV1alpha1().RuntimeClasses().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NodeV1alpha1().RuntimeClasses().Watch(options) - }, - }, - &nodev1alpha1.RuntimeClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1alpha1.RuntimeClass{}, f.defaultInformer) -} - -func (f *runtimeClassInformer) Lister() v1alpha1.RuntimeClassLister { - return v1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go deleted file mode 100644 index 44a1defb6..000000000 --- a/vendor/k8s.io/client-go/informers/node/v1beta1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // RuntimeClasses returns a RuntimeClassInformer. - RuntimeClasses() RuntimeClassInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// RuntimeClasses returns a RuntimeClassInformer. -func (v *version) RuntimeClasses() RuntimeClassInformer { - return &runtimeClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go deleted file mode 100644 index 6972993ad..000000000 --- a/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - nodev1beta1 "k8s.io/api/node/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/node/v1beta1" - cache "k8s.io/client-go/tools/cache" -) - -// RuntimeClassInformer provides access to a shared informer and lister for -// RuntimeClasses. -type RuntimeClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.RuntimeClassLister -} - -type runtimeClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewRuntimeClassInformer constructs a new informer for RuntimeClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredRuntimeClassInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredRuntimeClassInformer constructs a new informer for RuntimeClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NodeV1beta1().RuntimeClasses().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NodeV1beta1().RuntimeClasses().Watch(options) - }, - }, - &nodev1beta1.RuntimeClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredRuntimeClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1beta1.RuntimeClass{}, f.defaultInformer) -} - -func (f *runtimeClassInformer) Lister() v1beta1.RuntimeClassLister { - return v1beta1.NewRuntimeClassLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/scheduling/interface.go b/vendor/k8s.io/client-go/informers/scheduling/interface.go index 659089b53..16d030c30 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/interface.go +++ b/vendor/k8s.io/client-go/informers/scheduling/interface.go @@ -20,15 +20,12 @@ package scheduling import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1 "k8s.io/client-go/informers/scheduling/v1" v1alpha1 "k8s.io/client-go/informers/scheduling/v1alpha1" v1beta1 "k8s.io/client-go/informers/scheduling/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1 provides access to shared informers for resources in V1. - V1() v1.Interface // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. @@ -46,11 +43,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1 returns a new v1.Interface. -func (g *group) V1() v1.Interface { - return v1.New(g.factory, g.namespace, g.tweakListOptions) -} - // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go deleted file mode 100644 index fd7931f34..000000000 --- a/vendor/k8s.io/client-go/informers/scheduling/v1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // PriorityClasses returns a PriorityClassInformer. - PriorityClasses() PriorityClassInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// PriorityClasses returns a PriorityClassInformer. -func (v *version) PriorityClasses() PriorityClassInformer { - return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go deleted file mode 100644 index a9ee6289e..000000000 --- a/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - time "time" - - schedulingv1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/scheduling/v1" - cache "k8s.io/client-go/tools/cache" -) - -// PriorityClassInformer provides access to a shared informer and lister for -// PriorityClasses. -type PriorityClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.PriorityClassLister -} - -type priorityClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewPriorityClassInformer constructs a new informer for PriorityClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1().PriorityClasses().List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1().PriorityClasses().Watch(options) - }, - }, - &schedulingv1.PriorityClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1.PriorityClass{}, f.defaultInformer) -} - -func (f *priorityClassInformer) Lister() v1.PriorityClassLister { - return v1.NewPriorityClassLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1/interface.go index 64fc2bd84..d7e4b5c49 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -26,8 +26,6 @@ import ( type Interface interface { // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer - // VolumeAttachments returns a VolumeAttachmentInformer. - VolumeAttachments() VolumeAttachmentInformer } type version struct { @@ -45,8 +43,3 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (v *version) StorageClasses() StorageClassInformer { return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } - -// VolumeAttachments returns a VolumeAttachmentInformer. -func (v *version) VolumeAttachments() VolumeAttachmentInformer { - return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go deleted file mode 100644 index 7ca3b86f2..000000000 --- a/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - time "time" - - storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" - cache "k8s.io/client-go/tools/cache" -) - -// VolumeAttachmentInformer provides access to a shared informer and lister for -// VolumeAttachments. -type VolumeAttachmentInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.VolumeAttachmentLister -} - -type volumeAttachmentInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1().VolumeAttachments().List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1().VolumeAttachments().Watch(options) - }, - }, - &storagev1.VolumeAttachment{}, - resyncPeriod, - indexers, - ) -} - -func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.VolumeAttachment{}, f.defaultInformer) -} - -func (f *volumeAttachmentInformer) Lister() v1.VolumeAttachmentLister { - return v1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go deleted file mode 100644 index 7f7cb216d..000000000 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - storagev1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" - cache "k8s.io/client-go/tools/cache" -) - -// CSIDriverInformer provides access to a shared informer and lister for -// CSIDrivers. -type CSIDriverInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.CSIDriverLister -} - -type cSIDriverInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewCSIDriverInformer constructs a new informer for CSIDriver type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCSIDriverInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredCSIDriverInformer constructs a new informer for CSIDriver type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1beta1().CSIDrivers().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1beta1().CSIDrivers().Watch(options) - }, - }, - &storagev1beta1.CSIDriver{}, - resyncPeriod, - indexers, - ) -} - -func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCSIDriverInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSIDriver{}, f.defaultInformer) -} - -func (f *cSIDriverInformer) Lister() v1beta1.CSIDriverLister { - return v1beta1.NewCSIDriverLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go deleted file mode 100644 index 218bb1183..000000000 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - time "time" - - storagev1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" - cache "k8s.io/client-go/tools/cache" -) - -// CSINodeInformer provides access to a shared informer and lister for -// CSINodes. -type CSINodeInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.CSINodeLister -} - -type cSINodeInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewCSINodeInformer constructs a new informer for CSINode type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredCSINodeInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredCSINodeInformer constructs a new informer for CSINode type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1beta1().CSINodes().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1beta1().CSINodes().Watch(options) - }, - }, - &storagev1beta1.CSINode{}, - resyncPeriod, - indexers, - ) -} - -func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredCSINodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSINode{}, f.defaultInformer) -} - -func (f *cSINodeInformer) Lister() v1beta1.CSINodeLister { - return v1beta1.NewCSINodeLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go index af4ee2f74..aa11c2bb6 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -24,10 +24,6 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // CSIDrivers returns a CSIDriverInformer. - CSIDrivers() CSIDriverInformer - // CSINodes returns a CSINodeInformer. - CSINodes() CSINodeInformer // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer // VolumeAttachments returns a VolumeAttachmentInformer. @@ -45,16 +41,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// CSIDrivers returns a CSIDriverInformer. -func (v *version) CSIDrivers() CSIDriverInformer { - return &cSIDriverInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// CSINodes returns a CSINodeInformer. -func (v *version) CSINodes() CSINodeInformer { - return &cSINodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - // StorageClasses returns a StorageClassInformer. func (v *version) StorageClasses() StorageClassInformer { return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index fb889e6df..122e4bb7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,11 +20,11 @@ package kubernetes import ( discovery "k8s.io/client-go/discovery" + admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" - auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -36,20 +36,15 @@ import ( batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" - networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" - nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" - nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" @@ -62,41 +57,70 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface + AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface - AppsV1() appsv1.AppsV1Interface + // Deprecated: please explicitly pick a version if possible. + Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface - AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface + AppsV1() appsv1.AppsV1Interface + // Deprecated: please explicitly pick a version if possible. + Apps() appsv1.AppsV1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authentication() authenticationv1.AuthenticationV1Interface AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface AuthorizationV1() authorizationv1.AuthorizationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authorization() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface AutoscalingV1() autoscalingv1.AutoscalingV1Interface + // Deprecated: please explicitly pick a version if possible. + Autoscaling() autoscalingv1.AutoscalingV1Interface AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface BatchV1() batchv1.BatchV1Interface + // Deprecated: please explicitly pick a version if possible. + Batch() batchv1.BatchV1Interface BatchV1beta1() batchv1beta1.BatchV1beta1Interface BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Certificates() certificatesv1beta1.CertificatesV1beta1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface - CoordinationV1() coordinationv1.CoordinationV1Interface + // Deprecated: please explicitly pick a version if possible. + Coordination() coordinationv1beta1.CoordinationV1beta1Interface CoreV1() corev1.CoreV1Interface + // Deprecated: please explicitly pick a version if possible. + Core() corev1.CoreV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Extensions() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface - NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface - NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface - NodeV1beta1() nodev1beta1.NodeV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Networking() networkingv1.NetworkingV1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Policy() policyv1beta1.PolicyV1beta1Interface RbacV1() rbacv1.RbacV1Interface + // Deprecated: please explicitly pick a version if possible. + Rbac() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface - SchedulingV1() schedulingv1.SchedulingV1Interface + // Deprecated: please explicitly pick a version if possible. + Scheduling() schedulingv1beta1.SchedulingV1beta1Interface SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Settings() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface + // Deprecated: please explicitly pick a version if possible. + Storage() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } @@ -104,42 +128,42 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client - appsV1 *appsv1.AppsV1Client - appsV1beta1 *appsv1beta1.AppsV1beta1Client - appsV1beta2 *appsv1beta2.AppsV1beta2Client - auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client - authenticationV1 *authenticationv1.AuthenticationV1Client - authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client - authorizationV1 *authorizationv1.AuthorizationV1Client - authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client - autoscalingV1 *autoscalingv1.AutoscalingV1Client - autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client - autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client - batchV1 *batchv1.BatchV1Client - batchV1beta1 *batchv1beta1.BatchV1beta1Client - batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client - certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client - coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client - coordinationV1 *coordinationv1.CoordinationV1Client - coreV1 *corev1.CoreV1Client - eventsV1beta1 *eventsv1beta1.EventsV1beta1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - networkingV1 *networkingv1.NetworkingV1Client - networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client - nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client - nodeV1beta1 *nodev1beta1.NodeV1beta1Client - policyV1beta1 *policyv1beta1.PolicyV1beta1Client - rbacV1 *rbacv1.RbacV1Client - rbacV1beta1 *rbacv1beta1.RbacV1beta1Client - rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client - schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client - schedulingV1 *schedulingv1.SchedulingV1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client - storageV1beta1 *storagev1beta1.StorageV1beta1Client - storageV1 *storagev1.StorageV1Client - storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client + admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + appsV1 *appsv1.AppsV1Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client + coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + networkingV1 *networkingv1.NetworkingV1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client +} + +// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client +func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { + return c.admissionregistrationV1alpha1 } // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client @@ -147,9 +171,10 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } -// AppsV1 retrieves the AppsV1Client -func (c *Clientset) AppsV1() appsv1.AppsV1Interface { - return c.appsV1 +// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. +// Please explicitly pick a version. +func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -162,9 +187,15 @@ func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { return c.appsV1beta2 } -// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client -func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { - return c.auditregistrationV1alpha1 +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 +} + +// Deprecated: Apps retrieves the default version of AppsClient. +// Please explicitly pick a version. +func (c *Clientset) Apps() appsv1.AppsV1Interface { + return c.appsV1 } // AuthenticationV1 retrieves the AuthenticationV1Client @@ -172,6 +203,12 @@ func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interfac return c.authenticationV1 } +// Deprecated: Authentication retrieves the default version of AuthenticationClient. +// Please explicitly pick a version. +func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { + return c.authenticationV1 +} + // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return c.authenticationV1beta1 @@ -182,6 +219,12 @@ func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return c.authorizationV1 } +// Deprecated: Authorization retrieves the default version of AuthorizationClient. +// Please explicitly pick a version. +func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { + return c.authorizationV1 +} + // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return c.authorizationV1beta1 @@ -192,6 +235,12 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return c.autoscalingV1 } +// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. +// Please explicitly pick a version. +func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { + return c.autoscalingV1 +} + // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return c.autoscalingV2beta1 @@ -207,6 +256,12 @@ func (c *Clientset) BatchV1() batchv1.BatchV1Interface { return c.batchV1 } +// Deprecated: Batch retrieves the default version of BatchClient. +// Please explicitly pick a version. +func (c *Clientset) Batch() batchv1.BatchV1Interface { + return c.batchV1 +} + // BatchV1beta1 retrieves the BatchV1beta1Client func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { return c.batchV1beta1 @@ -222,14 +277,21 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } +// Deprecated: Certificates retrieves the default version of CertificatesClient. +// Please explicitly pick a version. +func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { + return c.certificatesV1beta1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 } -// CoordinationV1 retrieves the CoordinationV1Client -func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { - return c.coordinationV1 +// Deprecated: Coordination retrieves the default version of CoordinationClient. +// Please explicitly pick a version. +func (c *Clientset) Coordination() coordinationv1beta1.CoordinationV1beta1Interface { + return c.coordinationV1beta1 } // CoreV1 retrieves the CoreV1Client @@ -237,34 +299,43 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } +// Deprecated: Core retrieves the default version of CoreClient. +// Please explicitly pick a version. +func (c *Clientset) Core() corev1.CoreV1Interface { + return c.coreV1 +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 } +// Deprecated: Events retrieves the default version of EventsClient. +// Please explicitly pick a version. +func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return c.extensionsV1beta1 } +// Deprecated: Extensions retrieves the default version of ExtensionsClient. +// Please explicitly pick a version. +func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { + return c.extensionsV1beta1 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } -// NetworkingV1beta1 retrieves the NetworkingV1beta1Client -func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { - return c.networkingV1beta1 -} - -// NodeV1alpha1 retrieves the NodeV1alpha1Client -func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { - return c.nodeV1alpha1 -} - -// NodeV1beta1 retrieves the NodeV1beta1Client -func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { - return c.nodeV1beta1 +// Deprecated: Networking retrieves the default version of NetworkingClient. +// Please explicitly pick a version. +func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { + return c.networkingV1 } // PolicyV1beta1 retrieves the PolicyV1beta1Client @@ -272,11 +343,23 @@ func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return c.policyV1beta1 } +// Deprecated: Policy retrieves the default version of PolicyClient. +// Please explicitly pick a version. +func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { + return c.policyV1beta1 +} + // RbacV1 retrieves the RbacV1Client func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { return c.rbacV1 } +// Deprecated: Rbac retrieves the default version of RbacClient. +// Please explicitly pick a version. +func (c *Clientset) Rbac() rbacv1.RbacV1Interface { + return c.rbacV1 +} + // RbacV1beta1 retrieves the RbacV1beta1Client func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return c.rbacV1beta1 @@ -297,9 +380,10 @@ func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Inter return c.schedulingV1beta1 } -// SchedulingV1 retrieves the SchedulingV1Client -func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { - return c.schedulingV1 +// Deprecated: Scheduling retrieves the default version of SchedulingClient. +// Please explicitly pick a version. +func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface { + return c.schedulingV1beta1 } // SettingsV1alpha1 retrieves the SettingsV1alpha1Client @@ -307,6 +391,12 @@ func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interfac return c.settingsV1alpha1 } +// Deprecated: Settings retrieves the default version of SettingsClient. +// Please explicitly pick a version. +func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { + return c.settingsV1alpha1 +} + // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -317,6 +407,12 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { return c.storageV1 } +// Deprecated: Storage retrieves the default version of StorageClient. +// Please explicitly pick a version. +func (c *Clientset) Storage() storagev1.StorageV1Interface { + return c.storageV1 +} + // StorageV1alpha1 retrieves the StorageV1alpha1Client func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return c.storageV1alpha1 @@ -338,11 +434,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -354,7 +450,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy) + cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -406,10 +502,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.coordinationV1, err = coordinationv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -426,18 +518,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.networkingV1beta1, err = networkingv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -462,10 +542,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.schedulingV1, err = schedulingv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -494,11 +570,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) - cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) - cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c) + cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) @@ -511,21 +587,16 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) - cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) - cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) - cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) - cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) cs.rbacV1 = rbacv1.NewForConfigOrDie(c) cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) - cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) @@ -538,11 +609,11 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) - cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) - cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c) + cs.appsV1 = appsv1.New(c) cs.authenticationV1 = authenticationv1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) @@ -555,21 +626,16 @@ func New(c rest.Interface) *Clientset { cs.batchV2alpha1 = batchv2alpha1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) - cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) - cs.networkingV1beta1 = networkingv1beta1.New(c) - cs.nodeV1alpha1 = nodev1alpha1.New(c) - cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) - cs.schedulingV1 = schedulingv1.New(c) cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 8346d26a5..9ca89b76e 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -19,11 +19,11 @@ limitations under the License. package scheme import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" - auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" @@ -35,20 +35,15 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" batchv2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" - coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" - networkingv1beta1 "k8s.io/api/networking/v1beta1" - nodev1alpha1 "k8s.io/api/node/v1alpha1" - nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -66,11 +61,11 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + admissionregistrationv1alpha1.AddToScheme, admissionregistrationv1beta1.AddToScheme, - appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, - auditregistrationv1alpha1.AddToScheme, + appsv1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, @@ -83,21 +78,16 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv2alpha1.AddToScheme, certificatesv1beta1.AddToScheme, coordinationv1beta1.AddToScheme, - coordinationv1.AddToScheme, corev1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, - networkingv1beta1.AddToScheme, - nodev1alpha1.AddToScheme, - nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, - schedulingv1.AddToScheme, settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go similarity index 54% rename from vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index ec63179ea..5e02f7227 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -19,27 +19,28 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type AuditregistrationV1alpha1Interface interface { +type AdmissionregistrationV1alpha1Interface interface { RESTClient() rest.Interface - AuditSinksGetter + InitializerConfigurationsGetter } -// AuditregistrationV1alpha1Client is used to interact with features provided by the auditregistration.k8s.io group. -type AuditregistrationV1alpha1Client struct { +// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1alpha1Client struct { restClient rest.Interface } -func (c *AuditregistrationV1alpha1Client) AuditSinks() AuditSinkInterface { - return newAuditSinks(c) +func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { + return newInitializerConfigurations(c) } -// NewForConfig creates a new AuditregistrationV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { +// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +49,12 @@ func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { if err != nil { return nil, err } - return &AuditregistrationV1alpha1Client{client}, nil + return &AdmissionregistrationV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new AuditregistrationV1alpha1Client for the given config and +// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,16 +62,16 @@ func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { return client } -// New creates a new AuditregistrationV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *AuditregistrationV1alpha1Client { - return &AuditregistrationV1alpha1Client{c} +// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { + return &AdmissionregistrationV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() @@ -81,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *AuditregistrationV1alpha1Client) RESTClient() rest.Interface { +func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index fcef31d16..1e29b96f4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type RuntimeClassExpansion interface{} +type InitializerConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 000000000..e014ea72b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface. +// A group's client should implement this interface. +type InitializerConfigurationsGetter interface { + InitializerConfigurations() InitializerConfigurationInterface +} + +// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources. +type InitializerConfigurationInterface interface { + Create(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) + Update(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.InitializerConfiguration, error) + List(opts v1.ListOptions) (*v1alpha1.InitializerConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) + InitializerConfigurationExpansion +} + +// initializerConfigurations implements InitializerConfigurationInterface +type initializerConfigurations struct { + client rest.Interface +} + +// newInitializerConfigurations returns a InitializerConfigurations +func newInitializerConfigurations(c *AdmissionregistrationV1alpha1Client) *initializerConfigurations { + return &initializerConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. +func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Get(). + Resource("initializerconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. +func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { + result = &v1alpha1.InitializerConfigurationList{} + err = c.client.Get(). + Resource("initializerconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested initializerConfigurations. +func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("initializerconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *initializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Post(). + Resource("initializerconfigurations"). + Body(initializerConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *initializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Put(). + Resource("initializerconfigurations"). + Name(initializerConfiguration.Name). + Body(initializerConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. +func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("initializerconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("initializerconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched initializerConfiguration. +func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Patch(pt). + Resource("initializerconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 2d93ff02e..b13ea7953 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -75,7 +76,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 4524896cd..cb0157102 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.MutatingWebhookConfigurationList{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1bet // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("mutatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 7e711b300..3a9339f6c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ValidatingWebhookConfigurationList{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *validatingWebhookConfigurations) Delete(name string, options *v1.Delete // DeleteCollection deletes a collection of objects. func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("validatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 621c734af..da19c7596 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/apps/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,7 +91,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index e28e4d2a3..1ddaa1a71 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.Controll // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index a535cdabe..03a870698 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.Dae // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, er // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index f9799a453..73d46f8bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,10 +19,7 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,9 +44,6 @@ type DeploymentInterface interface { List(opts metav1.ListOptions) (*v1.DeploymentList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) - GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) - DeploymentExpansion } @@ -82,16 +76,11 @@ func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.De // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +88,11 @@ func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +150,10 @@ func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -193,31 +172,3 @@ func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } - -// GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index ff3504e78..077941162 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,10 +19,7 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,9 +44,6 @@ type ReplicaSetInterface interface { List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) - GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) - ReplicaSetExpansion } @@ -82,16 +76,11 @@ func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.Re // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +88,11 @@ func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +150,10 @@ func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -193,31 +172,3 @@ func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } - -// GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index c12c470bb..54322d97d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,10 +19,7 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,9 +44,6 @@ type StatefulSetInterface interface { List(opts metav1.ListOptions) (*v1.StatefulSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) - GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) - StatefulSetExpansion } @@ -82,16 +76,11 @@ func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.S // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +88,11 @@ func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +150,10 @@ func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -193,31 +172,3 @@ func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subre Into(result) return } - -// GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index e5dd64d98..4d882e26e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -28,6 +29,7 @@ type AppsV1beta1Interface interface { RESTClient() rest.Interface ControllerRevisionsGetter DeploymentsGetter + ScalesGetter StatefulSetsGetter } @@ -44,6 +46,10 @@ func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { return newDeployments(c, namespace) } +func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } @@ -80,7 +86,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 45ddb9159..ec8fa9242 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 05fdcb7a6..365e06f3f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go index 113455df2..b2bfd73a7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -22,4 +22,6 @@ type ControllerRevisionExpansion interface{} type DeploymentExpansion interface{} +type ScaleExpansion interface{} + type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go similarity index 51% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go index 669dd0282..cef27bd14 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go @@ -18,4 +18,31 @@ limitations under the License. package v1beta1 -type RuntimeClassExpansion interface{} +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index c4b35b424..651745451 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 7ca4e0b20..27549499f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -20,6 +20,7 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -30,6 +31,7 @@ type AppsV1beta2Interface interface { DaemonSetsGetter DeploymentsGetter ReplicaSetsGetter + ScalesGetter StatefulSetsGetter } @@ -54,6 +56,10 @@ func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } +func (c *AppsV1beta2Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } @@ -90,7 +96,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e1d602515..1271cc623 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index f8b7ac259..683c06812 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 510250b06..9a04513f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go index 6a2174968..bceae5986 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -26,4 +26,6 @@ type DeploymentExpansion interface{} type ReplicaSetExpansion interface{} +type ScaleExpansion interface{} + type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 7b738774b..9fd9de930 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go new file mode 100644 index 000000000..f8d6a7fb0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta2Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index de7c3db8b..095601e15 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -81,16 +79,11 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -98,16 +91,11 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -165,15 +153,10 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go deleted file mode 100644 index 414d48006..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1alpha1 "k8s.io/api/auditregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// AuditSinksGetter has a method to return a AuditSinkInterface. -// A group's client should implement this interface. -type AuditSinksGetter interface { - AuditSinks() AuditSinkInterface -} - -// AuditSinkInterface has methods to work with AuditSink resources. -type AuditSinkInterface interface { - Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) - List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) - AuditSinkExpansion -} - -// auditSinks implements AuditSinkInterface -type auditSinks struct { - client rest.Interface -} - -// newAuditSinks returns a AuditSinks -func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks { - return &auditSinks{ - client: c.RESTClient(), - } -} - -// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. -func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Get(). - Resource("auditsinks"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. -func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.AuditSinkList{} - err = c.client.Get(). - Resource("auditsinks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested auditSinks. -func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("auditsinks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Post(). - Resource("auditsinks"). - Body(auditSink). - Do(). - Into(result) - return -} - -// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Put(). - Resource("auditsinks"). - Name(auditSink.Name). - Body(auditSink). - Do(). - Into(result) - return -} - -// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. -func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("auditsinks"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("auditsinks"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched auditSink. -func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Patch(pt). - Resource("auditsinks"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index de8864e22..3bdcee598 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/authentication/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 816bd0a2c..7f3334a0c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authentication/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index 2cc226322..e84b90084 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/authorization/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,7 +86,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 88eac75b7..7f236f6e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/authorization/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,7 +86,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index 4f3e96aec..2bd49e2db 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/autoscaling/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 0e0839fb5..6891b6b63 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) ( // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.Hor // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOpt // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index c1a91fc3e..3a49b26b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -20,6 +20,7 @@ package v2beta1 import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 02d5cfb9b..4ac8cce71 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,8 +19,6 @@ limitations under the License. package v2beta1 import ( - "time" - v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v2beta1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index bd2b39270..03fe25e48 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -20,6 +20,7 @@ package v2beta2 import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 91a0fa64f..ddabda7e7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,8 +19,6 @@ limitations under the License. package v2beta2 import ( - "time" - v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v2beta2.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index 8dfc118a3..d5e35e6b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/batch/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index b55c602b3..ba8332a9a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.JobList{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { // Watch returns a watch.Interface that watches the requested jobs. func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("jobs"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index 257085358..aa71ca833 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/batch/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index d89d2fa21..04637c36a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.Cron // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err e // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go index d45c19d52..e6c6306b8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -20,6 +20,7 @@ package v2alpha1 import ( v2alpha1 "k8s.io/api/batch/v2alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v2alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index 19123b604..4d922f9ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -19,8 +19,6 @@ limitations under the License. package v2alpha1 import ( - "time" - v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.Cro // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v2alpha1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index 1c52d551b..baac42ee2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/certificates/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index 712d3a01a..b39169a8f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (re // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.CertificateSigningRequestList{} err = c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1. // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("certificatesigningrequests"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go deleted file mode 100644 index 0df7b71bf..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/coordination/v1" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type CoordinationV1Interface interface { - RESTClient() rest.Interface - LeasesGetter -} - -// CoordinationV1Client is used to interact with features provided by the coordination.k8s.io group. -type CoordinationV1Client struct { - restClient rest.Interface -} - -func (c *CoordinationV1Client) Leases(namespace string) LeaseInterface { - return newLeases(c, namespace) -} - -// NewForConfig creates a new CoordinationV1Client for the given config. -func NewForConfig(c *rest.Config) (*CoordinationV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoordinationV1Client{client}, nil -} - -// NewForConfigOrDie creates a new CoordinationV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoordinationV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoordinationV1Client for the given RESTClient. -func New(c rest.Interface) *CoordinationV1Client { - return &CoordinationV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoordinationV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go deleted file mode 100644 index b6cf1b64f..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "k8s.io/api/coordination/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// LeasesGetter has a method to return a LeaseInterface. -// A group's client should implement this interface. -type LeasesGetter interface { - Leases(namespace string) LeaseInterface -} - -// LeaseInterface has methods to work with Lease resources. -type LeaseInterface interface { - Create(*v1.Lease) (*v1.Lease, error) - Update(*v1.Lease) (*v1.Lease, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Lease, error) - List(opts metav1.ListOptions) (*v1.LeaseList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) - LeaseExpansion -} - -// leases implements LeaseInterface -type leases struct { - client rest.Interface - ns string -} - -// newLeases returns a Leases -func newLeases(c *CoordinationV1Client, namespace string) *leases { - return &leases{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(name string, options metav1.GetOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(lease *v1.Lease) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - Body(lease). - Do(). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(lease *v1.Lease) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - Body(lease). - Do(). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index d68ed5d34..91a764843 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/coordination/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 490d815aa..16277255f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.LeaseList{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error // Watch returns a watch.Interface that watches the requested leases. func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *leases) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("leases"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 302b2fdc3..e497661cf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ComponentStatusList{} err = c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentS // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("componentstatuses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 18ce954ae..0984ae70c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.Con // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ConfigMapList{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, er // Watch returns a watch.Interface that watches the requested configMaps. func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 428d2afa3..044a28ebd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/core/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -145,7 +146,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/api" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index 978a2a196..dd8216789 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endp // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.EndpointsList{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err // Watch returns a watch.Interface that watches the requested endpoints. func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 55cfa0901..57d30f9fd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *events) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 2eeae11a8..5b385668b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.Li // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.LimitRangeList{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, // Watch returns a watch.Interface that watches the requested limitRanges. func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 8a81fe850..e22d07dec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Nam // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.NamespaceList{} err = c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, er // Watch returns a watch.Interface that watches the requested namespaces. func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index d19fab895..5c769c118 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, er // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.NodeList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { // Watch returns a watch.Interface that watches the requested nodes. func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("nodes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index 74514825e..d5f19aef5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PersistentVolumeList{} err = c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.Persistent // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("persistentvolumes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 410ab37dc..d32ae5dfd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PersistentVolumeClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.Persi // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 8d6b6e879..b19c5a5c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PodList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { // Watch returns a watch.Interface that watches the requested pods. func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("pods"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index 84d7c9805..d644e17d7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.P // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PodTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList // Watch returns a watch.Interface that watches the requested podTemplates. func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index dd3182db6..17622f1c2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,10 +19,8 @@ limitations under the License. package v1 import ( - "time" - - autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,8 +45,8 @@ type ReplicationControllerInterface interface { List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) - GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + GetScale(replicationControllerName string, options metav1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) ReplicationControllerExpansion } @@ -82,16 +80,11 @@ func (c *replicationControllers) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ReplicationControllerList{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +92,11 @@ func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.Repli // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +154,10 @@ func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -194,9 +177,9 @@ func (c *replicationControllers) Patch(name string, pt types.PatchType, data []b return } -// GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} +// GetScale takes name of the replicationController, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -209,8 +192,8 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 5a178990e..8b74a4046 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ResourceQuotaList{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuota // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 85c143b17..4ea9796b6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.SecretList{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err erro // Watch returns a watch.Interface that watches the requested secrets. func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("secrets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index b0e09413e..6c42ca87a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Servi // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err er // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index 50af6a21c..f3ab7eb87 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ServiceAccountList{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccou // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index 143281b25..af7d060d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *events) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go deleted file mode 100644 index 312ee4283..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - "k8s.io/api/events/v1beta1" - "k8s.io/apimachinery/pkg/types" -) - -// The EventExpansion interface allows manually adding extra methods to the EventInterface. -// TODO: Add querying functions to the event expansion -type EventExpansion interface { - // CreateWithEventNamespace is the same as a Create - // except that it sends the request to the event.Namespace. - CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) - // UpdateWithEventNamespace is the same as a Update - // except that it sends the request to the event.Namespace. - UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) - // PatchWithEventNamespace is the same as an Update - // except that it sends the request to the event.Namespace. - PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) -} - -// CreateWithEventNamespace makes a new event. -// Returns the copy of the event the server returns, or an error. -// The namespace to create the event within is deduced from the event. -// it must either match this event client's namespace, or this event client must -// have been created with the "" namespace. -func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) - } - result := &v1beta1.Event{} - err := e.client.Post(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Body(event). - Do(). - Into(result) - return result, err -} - -// UpdateWithEventNamespace modifies an existing event. -// It returns the copy of the event that the server returns, or an error. -// The namespace and key to update the event within is deduced from the event. -// The namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. -// Update also requires the ResourceVersion to be set in the event object. -func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) - } - result := &v1beta1.Event{} - err := e.client.Put(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return result, err -} - -// PatchWithEventNamespace modifies an existing event. -// It returns the copy of the event that the server returns, or an error. -// The namespace and name of the target event is deduced from the event. -// The namespace must either match this event client's namespace, or this event client must -// have been created with the "" namespace. -func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) - } - result := &v1beta1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Name(event.Name). - Body(data). - Do(). - Into(result) - return result, err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index e372ccffa..fb59635bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/events/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go index f6df76963..e27f693f8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -17,3 +17,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. package v1beta1 + +type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 93b1ae9b6..85294be4b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 5557b9f2b..89183d285 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -81,16 +79,11 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -98,16 +91,11 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -165,15 +153,10 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index e3b22aa44..1961ffc7c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -29,9 +30,9 @@ type ExtensionsV1beta1Interface interface { DaemonSetsGetter DeploymentsGetter IngressesGetter - NetworkPoliciesGetter PodSecurityPoliciesGetter ReplicaSetsGetter + ScalesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -51,10 +52,6 @@ func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } -func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { - return newNetworkPolicies(c, namespace) -} - func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { return newPodSecurityPolicies(c) } @@ -63,6 +60,10 @@ func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterf return newReplicaSets(c, namespace) } +func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c @@ -95,7 +96,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index 41d28f041..cfaeebd05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -22,8 +22,6 @@ type DaemonSetExpansion interface{} type IngressExpansion interface{} -type NetworkPolicyExpansion interface{} - type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index 4da51c368..f8b664cbd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ing // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.IngressList{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err // Watch returns a watch.Interface that watches the requested ingresses. func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go deleted file mode 100644 index 0607e2dd4..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. -// A group's client should implement this interface. -type NetworkPoliciesGetter interface { - NetworkPolicies(namespace string) NetworkPolicyInterface -} - -// NetworkPolicyInterface has methods to work with NetworkPolicy resources. -type NetworkPolicyInterface interface { - Create(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) - Update(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error) - List(opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) - NetworkPolicyExpansion -} - -// networkPolicies implements NetworkPolicyInterface -type networkPolicies struct { - client rest.Interface - ns string -} - -// newNetworkPolicies returns a NetworkPolicies -func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { - return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index a947a54a6..8099d7730 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 444029058..7e61fa2d1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -81,16 +79,11 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -98,16 +91,11 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -165,15 +153,10 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go new file mode 100644 index 000000000..6ee677acd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go new file mode 100644 index 000000000..c9733cb28 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*v1beta1.Scale, error) + Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index 5315d9b92..8684db456 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/networking/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index 3f39be957..d8f0a6b47 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.NetworkPolicyList{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolic // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go deleted file mode 100644 index 8d76678f1..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) - List(opts v1.ListOptions) (*v1beta1.IngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client rest.Interface - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { - return &ingresses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go deleted file mode 100644 index ee523f8e7..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type NetworkingV1beta1Interface interface { - RESTClient() rest.Interface - IngressesGetter -} - -// NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. -type NetworkingV1beta1Client struct { - restClient rest.Interface -} - -func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -// NewForConfig creates a new NetworkingV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NetworkingV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new NetworkingV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NetworkingV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NetworkingV1beta1Client for the given RESTClient. -func New(c rest.Interface) *NetworkingV1beta1Client { - return &NetworkingV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NetworkingV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go deleted file mode 100644 index 044460ec0..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1alpha1 "k8s.io/api/node/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RuntimeClassesGetter has a method to return a RuntimeClassInterface. -// A group's client should implement this interface. -type RuntimeClassesGetter interface { - RuntimeClasses() RuntimeClassInterface -} - -// RuntimeClassInterface has methods to work with RuntimeClass resources. -type RuntimeClassInterface interface { - Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) - List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) - RuntimeClassExpansion -} - -// runtimeClasses implements RuntimeClassInterface -type runtimeClasses struct { - client rest.Interface -} - -// newRuntimeClasses returns a RuntimeClasses -func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { - return &runtimeClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go deleted file mode 100644 index b3f7c497f..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/node/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RuntimeClassesGetter has a method to return a RuntimeClassInterface. -// A group's client should implement this interface. -type RuntimeClassesGetter interface { - RuntimeClasses() RuntimeClassInterface -} - -// RuntimeClassInterface has methods to work with RuntimeClass resources. -type RuntimeClassInterface interface { - Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) - List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) - RuntimeClassExpansion -} - -// runtimeClasses implements RuntimeClassInterface -type runtimeClasses struct { - client rest.Interface -} - -// newRuntimeClasses returns a RuntimeClasses -func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { - return &runtimeClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 864af9a26..a11f27eb2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result * // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PodDisruptionBudgetList{} err = c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDis // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go index d02096d74..355be1e9c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 8b8b22c6d..020e185e6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -80,7 +81,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 0a47c4411..c4299d4c6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.C // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index c16ebc312..30c0469a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterR // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index 1bc0179c6..e3855bb9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,7 +86,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index a17d791ff..81ea12a9f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, er // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index c87e45718..17c6f9913 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.R // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index 77e66877e..37a545762 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleLi // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 0d1b9d205..605078906 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cluste // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index efbbc68be..de83531ed 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -20,6 +20,7 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,7 +86,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 4a4b67240..aa6954bb5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index bf4e5a10e..0941b8e86 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingLi // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index 21d3cab37..bac951c87 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleLis // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 47eb9e4e7..96c91de6e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Cluster // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 4db94cfad..46718d731 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,7 +86,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 2b61aad52..66f382c07 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, e // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 0bd118fdf..67d3d331b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingLis // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go deleted file mode 100644 index 3abbb7b8e..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PriorityClassesGetter has a method to return a PriorityClassInterface. -// A group's client should implement this interface. -type PriorityClassesGetter interface { - PriorityClasses() PriorityClassInterface -} - -// PriorityClassInterface has methods to work with PriorityClass resources. -type PriorityClassInterface interface { - Create(*v1.PriorityClass) (*v1.PriorityClass, error) - Update(*v1.PriorityClass) (*v1.PriorityClass, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PriorityClass, error) - List(opts metav1.ListOptions) (*v1.PriorityClassList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) - PriorityClassExpansion -} - -// priorityClasses implements PriorityClassInterface -type priorityClasses struct { - client rest.Interface -} - -// newPriorityClasses returns a PriorityClasses -func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { - return &priorityClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go deleted file mode 100644 index 5028bac89..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/scheduling/v1" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type SchedulingV1Interface interface { - RESTClient() rest.Interface - PriorityClassesGetter -} - -// SchedulingV1Client is used to interact with features provided by the scheduling.k8s.io group. -type SchedulingV1Client struct { - restClient rest.Interface -} - -func (c *SchedulingV1Client) PriorityClasses() PriorityClassInterface { - return newPriorityClasses(c) -} - -// NewForConfig creates a new SchedulingV1Client for the given config. -func NewForConfig(c *rest.Config) (*SchedulingV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SchedulingV1Client{client}, nil -} - -// NewForConfigOrDie creates a new SchedulingV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SchedulingV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SchedulingV1Client for the given RESTClient. -func New(c rest.Interface) *SchedulingV1Client { - return &SchedulingV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SchedulingV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 29d646fb1..6845d25c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alp // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityCl // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 83bc0b8a9..375f41b8d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -20,6 +20,7 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 5e402f8e3..57b9766e4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1bet // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityCla // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index 373f5cca8..6feec4aec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/scheduling/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index 8fd6adc56..f000ae486 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.P // List takes label and field selectors, and returns the list of PodPresets that match those selectors. func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.PodPresetList{} err = c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, // Watch returns a watch.Interface that watches the requested podPresets. func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go index 8d3a8d8e1..c2a03b960 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -20,6 +20,7 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/settings/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index ccac16114..2bea7ec7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -19,5 +19,3 @@ limitations under the License. package v1 type StorageClassExpansion interface{} - -type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 1afbe93c9..ac48f4916 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -20,6 +20,7 @@ package v1 import ( v1 "k8s.io/api/storage/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -27,7 +28,6 @@ import ( type StorageV1Interface interface { RESTClient() rest.Interface StorageClassesGetter - VolumeAttachmentsGetter } // StorageV1Client is used to interact with features provided by the storage.k8s.io group. @@ -39,10 +39,6 @@ func (c *StorageV1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } -func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { - return newVolumeAttachments(c) -} - // NewForConfig creates a new StorageV1Client for the given config. func NewForConfig(c *rest.Config) (*StorageV1Client, error) { config := *c @@ -75,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 3f4c48f0a..0f7f57f05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassL // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go deleted file mode 100644 index 0f45097b2..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. -// A group's client should implement this interface. -type VolumeAttachmentsGetter interface { - VolumeAttachments() VolumeAttachmentInterface -} - -// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. -type VolumeAttachmentInterface interface { - Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) - List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) - VolumeAttachmentExpansion -} - -// volumeAttachments implements VolumeAttachmentInterface -type volumeAttachments struct { - client rest.Interface -} - -// newVolumeAttachments returns a VolumeAttachments -func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { - return &volumeAttachments{ - client: c.RESTClient(), - } -} - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index 32d503060..c52f630ac 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -20,6 +20,7 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 7fef94e8d..e6af00185 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1a // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go deleted file mode 100644 index 86cf9bf18..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// CSIDriversGetter has a method to return a CSIDriverInterface. -// A group's client should implement this interface. -type CSIDriversGetter interface { - CSIDrivers() CSIDriverInterface -} - -// CSIDriverInterface has methods to work with CSIDriver resources. -type CSIDriverInterface interface { - Create(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Update(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CSIDriver, error) - List(opts v1.ListOptions) (*v1beta1.CSIDriverList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) - CSIDriverExpansion -} - -// cSIDrivers implements CSIDriverInterface -type cSIDrivers struct { - client rest.Interface -} - -// newCSIDrivers returns a CSIDrivers -func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { - return &cSIDrivers{ - client: c.RESTClient(), - } -} - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - Body(cSIDriver). - Do(). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - Body(cSIDriver). - Do(). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go deleted file mode 100644 index e5540c128..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// CSINodesGetter has a method to return a CSINodeInterface. -// A group's client should implement this interface. -type CSINodesGetter interface { - CSINodes() CSINodeInterface -} - -// CSINodeInterface has methods to work with CSINode resources. -type CSINodeInterface interface { - Create(*v1beta1.CSINode) (*v1beta1.CSINode, error) - Update(*v1beta1.CSINode) (*v1beta1.CSINode, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CSINode, error) - List(opts v1.ListOptions) (*v1beta1.CSINodeList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) - CSINodeExpansion -} - -// cSINodes implements CSINodeInterface -type cSINodes struct { - client rest.Interface -} - -// newCSINodes returns a CSINodes -func newCSINodes(c *StorageV1beta1Client) *cSINodes { - return &cSINodes{ - client: c.RESTClient(), - } -} - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - Body(cSINode). - Do(). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - Body(cSINode). - Do(). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 7ba93142b..559f88f67 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -18,10 +18,6 @@ limitations under the License. package v1beta1 -type CSIDriverExpansion interface{} - -type CSINodeExpansion interface{} - type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 5e12b025b..4bdebb878 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -20,14 +20,13 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) type StorageV1beta1Interface interface { RESTClient() rest.Interface - CSIDriversGetter - CSINodesGetter StorageClassesGetter VolumeAttachmentsGetter } @@ -37,14 +36,6 @@ type StorageV1beta1Client struct { restClient rest.Interface } -func (c *StorageV1beta1Client) CSIDrivers() CSIDriverInterface { - return newCSIDrivers(c) -} - -func (c *StorageV1beta1Client) CSINodes() CSINodeInterface { - return newCSINodes(c) -} - func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } @@ -85,7 +76,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 8a8f38916..fbe1fd4c2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClass // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index d319407f2..5cd2d3919 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1b // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAtt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go similarity index 78% rename from vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go rename to vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go index 533dd0631..2c9f9f6a6 100644 --- a/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -18,6 +18,6 @@ limitations under the License. package v1alpha1 -// AuditSinkListerExpansion allows custom methods to be added to -// AuditSinkLister. -type AuditSinkListerExpansion interface{} +// InitializerConfigurationListerExpansion allows custom methods to be added to +// InitializerConfigurationLister. +type InitializerConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 000000000..dbd7301fc --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// InitializerConfigurationLister helps list InitializerConfigurations. +type InitializerConfigurationLister interface { + // List lists all InitializerConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) + // Get retrieves the InitializerConfiguration from the index for a given name. + Get(name string) (*v1alpha1.InitializerConfiguration, error) + InitializerConfigurationListerExpansion +} + +// initializerConfigurationLister implements the InitializerConfigurationLister interface. +type initializerConfigurationLister struct { + indexer cache.Indexer +} + +// NewInitializerConfigurationLister returns a new InitializerConfigurationLister. +func NewInitializerConfigurationLister(indexer cache.Indexer) InitializerConfigurationLister { + return &initializerConfigurationLister{indexer: indexer} +} + +// List lists all InitializerConfigurations in the indexer. +func (s *initializerConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.InitializerConfiguration)) + }) + return ret, err +} + +// Get retrieves the InitializerConfiguration from the index for a given name. +func (s *initializerConfigurationLister) Get(name string) (*v1alpha1.InitializerConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("initializerconfiguration"), name) + } + return obj.(*v1alpha1.InitializerConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go index c73cf98c7..8f8d08434 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go @@ -33,3 +33,11 @@ type DeploymentListerExpansion interface{} // DeploymentNamespaceListerExpansion allows custom methods to be added to // DeploymentNamespaceLister. type DeploymentNamespaceListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go new file mode 100644 index 000000000..ef8a2630e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) + } + return obj.(*v1beta1.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go index bac6ccb9a..d468f38e7 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go @@ -25,3 +25,11 @@ type ControllerRevisionListerExpansion interface{} // ControllerRevisionNamespaceListerExpansion allows custom methods to be added to // ControllerRevisionNamespaceLister. type ControllerRevisionNamespaceListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go new file mode 100644 index 000000000..d89329864 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta2.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta2.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta2.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("scale"), name) + } + return obj.(*v1beta2.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go deleted file mode 100644 index 3ae4528c8..000000000 --- a/vendor/k8s.io/client-go/listers/auditregistration/v1alpha1/auditsink.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/auditregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// AuditSinkLister helps list AuditSinks. -type AuditSinkLister interface { - // List lists all AuditSinks in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) - // Get retrieves the AuditSink from the index for a given name. - Get(name string) (*v1alpha1.AuditSink, error) - AuditSinkListerExpansion -} - -// auditSinkLister implements the AuditSinkLister interface. -type auditSinkLister struct { - indexer cache.Indexer -} - -// NewAuditSinkLister returns a new AuditSinkLister. -func NewAuditSinkLister(indexer cache.Indexer) AuditSinkLister { - return &auditSinkLister{indexer: indexer} -} - -// List lists all AuditSinks in the indexer. -func (s *auditSinkLister) List(selector labels.Selector) (ret []*v1alpha1.AuditSink, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.AuditSink)) - }) - return ret, err -} - -// Get retrieves the AuditSink from the index for a given name. -func (s *auditSinkLister) Get(name string) (*v1alpha1.AuditSink, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("auditsink"), name) - } - return obj.(*v1alpha1.AuditSink), nil -} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go deleted file mode 100644 index cc379088c..000000000 --- a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/coordination/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// LeaseLister helps list Leases. -type LeaseLister interface { - // List lists all Leases in the indexer. - List(selector labels.Selector) (ret []*v1.Lease, err error) - // Leases returns an object that can list and get Leases. - Leases(namespace string) LeaseNamespaceLister - LeaseListerExpansion -} - -// leaseLister implements the LeaseLister interface. -type leaseLister struct { - indexer cache.Indexer -} - -// NewLeaseLister returns a new LeaseLister. -func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{indexer: indexer} -} - -// List lists all Leases in the indexer. -func (s *leaseLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err -} - -// Leases returns an object that can list and get Leases. -func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// LeaseNamespaceLister helps list and get Leases. -type LeaseNamespaceLister interface { - // List lists all Leases in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1.Lease, err error) - // Get retrieves the Lease from the indexer for a given namespace and name. - Get(name string) (*v1.Lease, error) - LeaseNamespaceListerExpansion -} - -// leaseNamespaceLister implements the LeaseNamespaceLister -// interface. -type leaseNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Leases in the indexer for a given namespace. -func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err -} - -// Get retrieves the Lease from the indexer for a given namespace and name. -func (s leaseNamespaceLister) Get(name string) (*v1.Lease, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("lease"), name) - } - return obj.(*v1.Lease), nil -} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index 6d55ae9b8..b5ee8a492 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -26,14 +26,14 @@ type IngressListerExpansion interface{} // IngressNamespaceLister. type IngressNamespaceListerExpansion interface{} -// NetworkPolicyListerExpansion allows custom methods to be added to -// NetworkPolicyLister. -type NetworkPolicyListerExpansion interface{} - -// NetworkPolicyNamespaceListerExpansion allows custom methods to be added to -// NetworkPolicyNamespaceLister. -type NetworkPolicyNamespaceListerExpansion interface{} - // PodSecurityPolicyListerExpansion allows custom methods to be added to // PodSecurityPolicyLister. type PodSecurityPolicyListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go deleted file mode 100644 index 782f521ad..000000000 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// NetworkPolicyLister helps list NetworkPolicies. -type NetworkPolicyLister interface { - // List lists all NetworkPolicies in the indexer. - List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) - // NetworkPolicies returns an object that can list and get NetworkPolicies. - NetworkPolicies(namespace string) NetworkPolicyNamespaceLister - NetworkPolicyListerExpansion -} - -// networkPolicyLister implements the NetworkPolicyLister interface. -type networkPolicyLister struct { - indexer cache.Indexer -} - -// NewNetworkPolicyLister returns a new NetworkPolicyLister. -func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{indexer: indexer} -} - -// List lists all NetworkPolicies in the indexer. -func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err -} - -// NetworkPolicies returns an object that can list and get NetworkPolicies. -func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// NetworkPolicyNamespaceLister helps list and get NetworkPolicies. -type NetworkPolicyNamespaceLister interface { - // List lists all NetworkPolicies in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) - // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. - Get(name string) (*v1beta1.NetworkPolicy, error) - NetworkPolicyNamespaceListerExpansion -} - -// networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister -// interface. -type networkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NetworkPolicies in the indexer for a given namespace. -func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. -func (s networkPolicyNamespaceLister) Get(name string) (*v1beta1.NetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("networkpolicy"), name) - } - return obj.(*v1beta1.NetworkPolicy), nil -} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go new file mode 100644 index 000000000..527d4be42 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) + } + return obj.(*v1beta1.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go deleted file mode 100644 index 6676742e3..000000000 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// IngressLister helps list Ingresses. -type IngressLister interface { - // List lists all Ingresses in the indexer. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) - // Ingresses returns an object that can list and get Ingresses. - Ingresses(namespace string) IngressNamespaceLister - IngressListerExpansion -} - -// ingressLister implements the IngressLister interface. -type ingressLister struct { - indexer cache.Indexer -} - -// NewIngressLister returns a new IngressLister. -func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Ingresses returns an object that can list and get Ingresses. -func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// IngressNamespaceLister helps list and get Ingresses. -type IngressNamespaceLister interface { - // List lists all Ingresses in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) - // Get retrieves the Ingress from the indexer for a given namespace and name. - Get(name string) (*v1beta1.Ingress, error) - IngressNamespaceListerExpansion -} - -// ingressNamespaceLister implements the IngressNamespaceLister -// interface. -type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) - } - return obj.(*v1beta1.Ingress), nil -} diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go deleted file mode 100644 index af3f02b98..000000000 --- a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// RuntimeClassLister helps list RuntimeClasses. -type RuntimeClassLister interface { - // List lists all RuntimeClasses in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) - // Get retrieves the RuntimeClass from the index for a given name. - Get(name string) (*v1alpha1.RuntimeClass, error) - RuntimeClassListerExpansion -} - -// runtimeClassLister implements the RuntimeClassLister interface. -type runtimeClassLister struct { - indexer cache.Indexer -} - -// NewRuntimeClassLister returns a new RuntimeClassLister. -func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1alpha1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("runtimeclass"), name) - } - return obj.(*v1alpha1.RuntimeClass), nil -} diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go deleted file mode 100644 index a6744055c..000000000 --- a/vendor/k8s.io/client-go/listers/node/v1beta1/expansion_generated.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassListerExpansion allows custom methods to be added to -// RuntimeClassLister. -type RuntimeClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go deleted file mode 100644 index be642b999..000000000 --- a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// RuntimeClassLister helps list RuntimeClasses. -type RuntimeClassLister interface { - // List lists all RuntimeClasses in the indexer. - List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) - // Get retrieves the RuntimeClass from the index for a given name. - Get(name string) (*v1beta1.RuntimeClass, error) - RuntimeClassListerExpansion -} - -// runtimeClassLister implements the RuntimeClassLister interface. -type runtimeClassLister struct { - indexer cache.Indexer -} - -// NewRuntimeClassLister returns a new RuntimeClassLister. -func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1beta1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("runtimeclass"), name) - } - return obj.(*v1beta1.RuntimeClass), nil -} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go index d07d11a98..c0ab9d3ed 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go @@ -19,11 +19,11 @@ package v1beta1 import ( "fmt" + "github.com/golang/glog" "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog" ) // PodDisruptionBudgetListerExpansion allows custom methods to be added to @@ -54,7 +54,7 @@ func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]* pdb := list[i] selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector) if err != nil { - klog.Warningf("invalid selector: %v", err) + glog.Warningf("invalid selector: %v", err) // TODO(mml): add an event to the PDB continue } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go deleted file mode 100644 index d0c45d012..000000000 --- a/vendor/k8s.io/client-go/listers/scheduling/v1/expansion_generated.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -// PriorityClassListerExpansion allows custom methods to be added to -// PriorityClassLister. -type PriorityClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go deleted file mode 100644 index 452fee591..000000000 --- a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PriorityClassLister helps list PriorityClasses. -type PriorityClassLister interface { - // List lists all PriorityClasses in the indexer. - List(selector labels.Selector) (ret []*v1.PriorityClass, err error) - // Get retrieves the PriorityClass from the index for a given name. - Get(name string) (*v1.PriorityClass, error) - PriorityClassListerExpansion -} - -// priorityClassLister implements the PriorityClassLister interface. -type priorityClassLister struct { - indexer cache.Indexer -} - -// NewPriorityClassLister returns a new PriorityClassLister. -func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("priorityclass"), name) - } - return obj.(*v1.PriorityClass), nil -} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go index 9d7d88872..d93247064 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -21,7 +21,3 @@ package v1 // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} - -// VolumeAttachmentListerExpansion allows custom methods to be added to -// VolumeAttachmentLister. -type VolumeAttachmentListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go deleted file mode 100644 index 14888812e..000000000 --- a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// VolumeAttachmentLister helps list VolumeAttachments. -type VolumeAttachmentLister interface { - // List lists all VolumeAttachments in the indexer. - List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) - // Get retrieves the VolumeAttachment from the index for a given name. - Get(name string) (*v1.VolumeAttachment, error) - VolumeAttachmentListerExpansion -} - -// volumeAttachmentLister implements the VolumeAttachmentLister interface. -type volumeAttachmentLister struct { - indexer cache.Indexer -} - -// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. -func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("volumeattachment"), name) - } - return obj.(*v1.VolumeAttachment), nil -} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go deleted file mode 100644 index 8a4013750..000000000 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CSIDriverLister helps list CSIDrivers. -type CSIDriverLister interface { - // List lists all CSIDrivers in the indexer. - List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) - // Get retrieves the CSIDriver from the index for a given name. - Get(name string) (*v1beta1.CSIDriver, error) - CSIDriverListerExpansion -} - -// cSIDriverLister implements the CSIDriverLister interface. -type cSIDriverLister struct { - indexer cache.Indexer -} - -// NewCSIDriverLister returns a new CSIDriverLister. -func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{indexer: indexer} -} - -// List lists all CSIDrivers in the indexer. -func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIDriver)) - }) - return ret, err -} - -// Get retrieves the CSIDriver from the index for a given name. -func (s *cSIDriverLister) Get(name string) (*v1beta1.CSIDriver, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csidriver"), name) - } - return obj.(*v1beta1.CSIDriver), nil -} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go deleted file mode 100644 index bb7a2b2b3..000000000 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// CSINodeLister helps list CSINodes. -type CSINodeLister interface { - // List lists all CSINodes in the indexer. - List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) - // Get retrieves the CSINode from the index for a given name. - Get(name string) (*v1beta1.CSINode, error) - CSINodeListerExpansion -} - -// cSINodeLister implements the CSINodeLister interface. -type cSINodeLister struct { - indexer cache.Indexer -} - -// NewCSINodeLister returns a new CSINodeLister. -func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{indexer: indexer} -} - -// List lists all CSINodes in the indexer. -func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSINode)) - }) - return ret, err -} - -// Get retrieves the CSINode from the index for a given name. -func (s *cSINodeLister) Get(name string) (*v1beta1.CSINode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csinode"), name) - } - return obj.(*v1beta1.CSINode), nil -} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go index eeca4fdb4..21d95620c 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -18,14 +18,6 @@ limitations under the License. package v1beta1 -// CSIDriverListerExpansion allows custom methods to be added to -// CSIDriverLister. -type CSIDriverListerExpansion interface{} - -// CSINodeListerExpansion allows custom methods to be added to -// CSINodeLister. -type CSINodeListerExpansion interface{} - // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go index b99459757..d06482d55 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -16,5 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io - package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go index 19ab77614..016adb28a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -20,5 +20,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io - package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index c714e2457..921f3a2b9 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -22,7 +22,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredential is used by exec-based plugins to communicate credentials to +// ExecCredentials is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index 22d1c588b..fbcd9b7fe 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -20,5 +20,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io - package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go index 05e997e13..30399fb02 100644 --- a/vendor/k8s.io/client-go/pkg/version/doc.go +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -14,8 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:openapi-gen=true - // Package version supplies version information collected at build time to // kubernetes components. +// +k8s:openapi-gen=true package version // import "k8s.io/client-go/pkg/version" diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index b88902c10..cae9d0d61 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -31,9 +31,9 @@ import ( "sync" "time" - "github.com/davecgh/go-spew/spew" + "github.com/golang/glog" "golang.org/x/crypto/ssh/terminal" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -44,7 +44,6 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" - "k8s.io/klog" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -74,10 +73,8 @@ func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } -var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} - func cacheKey(c *api.ExecConfig) string { - return spewConfig.Sprint(c) + return fmt.Sprintf("%#v", c) } type cache struct { @@ -175,9 +172,13 @@ type credentials struct { // UpdateTransportConfig updates the transport.Config to use credentials // returned by the plugin. func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { - c.Wrap(func(rt http.RoundTripper) http.RoundTripper { + wt := c.WrapTransport + c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } return &roundTripper{a, rt} - }) + } if c.TLS.GetCert != nil { return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") @@ -227,7 +228,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { Code: int32(res.StatusCode), } if err := r.a.maybeRefreshCreds(creds, resp); err != nil { - klog.Errorf("refreshing credentials: %v", err) + glog.Errorf("refreshing credentials: %v", err) } } return res, nil diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index c75825ec5..10bef7dba 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -29,15 +29,14 @@ import ( "strings" "time" + "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/pkg/version" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/client-go/transport" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog" ) const ( @@ -96,16 +95,13 @@ type Config struct { // Transport may be used for custom HTTP behavior. This attribute may not // be specified with the TLS client certificate options. Use WrapTransport - // to provide additional per-server middleware behavior. + // for most client level operations. Transport http.RoundTripper // WrapTransport will be invoked for custom HTTP behavior after the underlying // transport is initialized (either the transport created from TLSClientConfig, // Transport, or http.DefaultTransport). The config may layer other RoundTrippers // on top of the returned RoundTripper. - // - // A future release will change this field to an array. Use config.Wrap() - // instead of setting this value directly. - WrapTransport transport.WrapperFunc + WrapTransport func(rt http.RoundTripper) http.RoundTripper // QPS indicates the maximum QPS to the master from this client. // If it's zero, the created RESTClient will use DefaultQPS: 5 @@ -129,47 +125,6 @@ type Config struct { // Version string } -var _ fmt.Stringer = new(Config) -var _ fmt.GoStringer = new(Config) - -type sanitizedConfig *Config - -type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister } - -func (sanitizedAuthConfigPersister) GoString() string { - return "rest.AuthProviderConfigPersister(--- REDACTED ---)" -} -func (sanitizedAuthConfigPersister) String() string { - return "rest.AuthProviderConfigPersister(--- REDACTED ---)" -} - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config -// to prevent accidental leaking via logs. -func (c *Config) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of Config to -// prevent accidental leaking via logs. -func (c *Config) String() string { - if c == nil { - return "" - } - cc := sanitizedConfig(CopyConfig(c)) - // Explicitly mark non-empty credential fields as redacted. - if cc.Password != "" { - cc.Password = "--- REDACTED ---" - } - if cc.BearerToken != "" { - cc.BearerToken = "--- REDACTED ---" - } - if cc.AuthConfigPersister != nil { - cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} - } - - return fmt.Sprintf("%#v", cc) -} - // ImpersonationConfig has all the available impersonation options type ImpersonationConfig struct { // UserName is the username to impersonate on each request. @@ -209,40 +164,6 @@ type TLSClientConfig struct { CAData []byte } -var _ fmt.Stringer = TLSClientConfig{} -var _ fmt.GoStringer = TLSClientConfig{} - -type sanitizedTLSClientConfig TLSClientConfig - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of -// TLSClientConfig to prevent accidental leaking via logs. -func (c TLSClientConfig) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of -// TLSClientConfig to prevent accidental leaking via logs. -func (c TLSClientConfig) String() string { - cc := sanitizedTLSClientConfig{ - Insecure: c.Insecure, - ServerName: c.ServerName, - CertFile: c.CertFile, - KeyFile: c.KeyFile, - CAFile: c.CAFile, - CertData: c.CertData, - KeyData: c.KeyData, - CAData: c.CAData, - } - // Explicitly mark non-empty credential fields as redacted. - if len(cc.CertData) != 0 { - cc.CertData = []byte("--- TRUNCATED ---") - } - if len(cc.KeyData) != 0 { - cc.KeyData = []byte("--- REDACTED ---") - } - return fmt.Sprintf("%#v", cc) -} - type ContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header @@ -414,7 +335,7 @@ func InClusterConfig() (*Config, error) { tlsClientConfig := TLSClientConfig{} if _, err := certutil.NewPool(rootCAFile); err != nil { - klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } @@ -487,7 +408,7 @@ func AddUserAgent(config *Config, userAgent string) *Config { return config } -// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (WrapTransport, Transport) removed +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed func AnonymousClientConfig(config *Config) *Config { // copy only known safe fields return &Config{ @@ -500,12 +421,14 @@ func AnonymousClientConfig(config *Config) *Config { CAFile: config.TLSClientConfig.CAFile, CAData: config.TLSClientConfig.CAData, }, - RateLimiter: config.RateLimiter, - UserAgent: config.UserAgent, - QPS: config.QPS, - Burst: config.Burst, - Timeout: config.Timeout, - Dial: config.Dial, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + Dial: config.Dial, } } diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index 83ef5ae32..cf8fbabfd 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - "k8s.io/klog" + "github.com/golang/glog" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -57,7 +57,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { if _, found := plugins[name]; found { return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) } - klog.V(4).Infof("Registered Auth Provider Plugin %q", name) + glog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin return nil } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0570615fc..9bb311448 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -32,6 +32,7 @@ import ( "strings" "time" + "github.com/golang/glog" "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,7 +44,6 @@ import ( restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog" ) var ( @@ -114,7 +114,7 @@ type Request struct { // NewRequest creates a new request helper object for accessing runtime.Objects on a server. func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { if backoff == nil { - klog.V(2).Infof("Not implementing request backoff strategy.") + glog.V(2).Infof("Not implementing request backoff strategy.") backoff = &NoBackoff{} } @@ -527,7 +527,7 @@ func (r *Request) tryThrottle() { r.throttle.Accept() } if latency := time.Since(now); latency > longThrottleLatency { - klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } } @@ -592,15 +592,10 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) if result := r.transformResponse(resp, req); result.err != nil { return nil, result.err } - return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) + return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) } wrapperDecoder := wrapperDecoderFn(resp.Body) - return watch.NewStreamWatcher( - restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder), - // use 500 to indicate that the cause of the error is unknown - other error codes - // are more specific to HTTP interactions, and set a reason - errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"), - ), nil + return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil } // updateURLMetrics is a convenience function for pushing metrics. @@ -688,7 +683,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { }() if r.err != nil { - klog.V(4).Infof("Error in request: %v", r.err) + glog.V(4).Infof("Error in request: %v", r.err) return r.err } @@ -775,13 +770,13 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { _, err := seeker.Seek(0, 0) if err != nil { - klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) fn(req, resp) return true } } - klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) + glog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) return false } @@ -849,14 +844,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 2. Apiserver sends back the headers and then part of the body // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. - klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) - streamErr := fmt.Errorf("Stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err) + glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) return Result{ err: streamErr, } default: - klog.Errorf("Unexpected error when reading response body: %v", err) - unexpectedErr := fmt.Errorf("Unexpected error when reading response body. Please retry. Original error: %v", err) + glog.Errorf("Unexpected error when reading response body: %#v", err) + unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) return Result{ err: unexpectedErr, } @@ -919,11 +914,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { - case bool(klog.V(10)): + case bool(glog.V(10)): return body - case bool(klog.V(9)): + case bool(glog.V(9)): max = 10240 - case bool(klog.V(8)): + case bool(glog.V(8)): max = 1024 } @@ -938,13 +933,13 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if klog.V(8) { + if glog.V(8) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - klog.Infof("%s: %s", prefix, truncateBody(string(body))) + glog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -1105,8 +1100,7 @@ func (r Result) Into(obj runtime.Object) error { return fmt.Errorf("serializer for %s doesn't exist", r.contentType) } if len(r.body) == 0 { - return fmt.Errorf("0-length response with status code: %d and content type: %s", - r.statusCode, r.contentType) + return fmt.Errorf("0-length response") } out, _, err := r.decoder.Decode(r.body, nil, obj) @@ -1147,7 +1141,7 @@ func (r Result) Error() error { // to be backwards compatible with old servers that do not return a version, default to "v1" out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { - klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) return r.err } switch t := out.(type) { @@ -1201,6 +1195,7 @@ func IsValidPathSegmentPrefix(name string) []string { func ValidatePathSegmentName(name string, prefix bool) []string { if prefix { return IsValidPathSegmentPrefix(name) + } else { + return IsValidPathSegmentName(name) } - return IsValidPathSegmentName(name) } diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index de33ecbfc..25c1801b6 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -74,10 +74,9 @@ func (c *Config) TransportConfig() (*transport.Config, error) { KeyFile: c.KeyFile, KeyData: c.KeyData, }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, - BearerTokenFile: c.BearerTokenFile, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, Groups: c.Impersonate.Groups, @@ -104,15 +103,14 @@ func (c *Config) TransportConfig() (*transport.Config, error) { if err != nil { return nil, err } - conf.Wrap(provider.WrapTransport) + wt := conf.WrapTransport + if wt != nil { + conf.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(wt(rt)) + } + } else { + conf.WrapTransport = provider.WrapTransport + } } return conf, nil } - -// Wrap adds a transport middleware function that will give the caller -// an opportunity to wrap the underlying http.RoundTripper prior to the -// first API call being made. The provided function is invoked after any -// existing transport wrappers are invoked. -func (c *Config) Wrap(fn transport.WrapperFunc) { - c.WrapTransport = transport.Wrappers(c.WrapTransport, fn) -} diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go index d00e42f86..eff848abc 100644 --- a/vendor/k8s.io/client-go/rest/urlbackoff.go +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -20,9 +20,9 @@ import ( "net/url" "time" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog" ) // Set of resp. Codes that we backoff for. @@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) { // Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { - klog.V(4).Infof("Disabling backoff strategy") + glog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } @@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { // in the future. host, err := url.Parse(rawurl.String()) if err != nil { - klog.V(4).Infof("Error extracting url: %v", rawurl) + glog.V(4).Infof("Error extracting url: %v", rawurl) panic("bad url!") } return host.Host @@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) return } else if responseCode >= 300 || err != nil { - klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) } //If we got this far, there is no backoff required for this URL anymore. diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index e95c020b2..73bb63add 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -54,7 +54,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, fmt.Errorf("unable to decode to metav1.Event") } switch got.Type { - case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): default: return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) } diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index e6db578ed..b99f231c8 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" ) func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { @@ -153,49 +152,45 @@ func NewUpdateAction(resource schema.GroupVersionResource, namespace string, obj return action } -func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { +func NewRootPatchAction(resource schema.GroupVersionResource, name string, patch []byte) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Name = name - action.PatchType = pt action.Patch = patch return action } -func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, patch []byte) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Namespace = namespace action.Name = name - action.PatchType = pt action.Patch = patch return action } -func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, patch []byte, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Subresource = path.Join(subresources...) action.Name = name - action.PatchType = pt action.Patch = patch return action } -func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, patch []byte, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Subresource = path.Join(subresources...) action.Namespace = namespace action.Name = name - action.PatchType = pt action.Patch = patch return action @@ -401,7 +396,6 @@ type DeleteCollectionAction interface { type PatchAction interface { Action GetName() string - GetPatchType() types.PatchType GetPatch() []byte } @@ -543,9 +537,8 @@ func (a UpdateActionImpl) DeepCopy() Action { type PatchActionImpl struct { ActionImpl - Name string - PatchType types.PatchType - Patch []byte + Name string + Patch []byte } func (a PatchActionImpl) GetName() string { @@ -556,17 +549,12 @@ func (a PatchActionImpl) GetPatch() []byte { return a.Patch } -func (a PatchActionImpl) GetPatchType() types.PatchType { - return a.PatchType -} - func (a PatchActionImpl) DeepCopy() Action { patch := make([]byte, len(a.Patch)) copy(patch, a.Patch) return PatchActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), Name: a.Name, - PatchType: a.PatchType, Patch: patch, } } diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go index 8b9ee149c..8b3f31eaf 100644 --- a/vendor/k8s.io/client-go/testing/fake.go +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -131,14 +131,13 @@ func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime. c.Lock() defer c.Unlock() - actionCopy := action.DeepCopy() c.actions = append(c.actions, action.DeepCopy()) for _, reactor := range c.ReactionChain { - if !reactor.Handles(actionCopy) { + if !reactor.Handles(action) { continue } - handled, ret, err := reactor.React(actionCopy) + handled, ret, err := reactor.React(action.DeepCopy()) if !handled { continue } @@ -155,14 +154,13 @@ func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { c.Lock() defer c.Unlock() - actionCopy := action.DeepCopy() c.actions = append(c.actions, action.DeepCopy()) for _, reactor := range c.WatchReactionChain { - if !reactor.Handles(actionCopy) { + if !reactor.Handles(action) { continue } - handled, ret, err := reactor.React(actionCopy) + handled, ret, err := reactor.React(action.DeepCopy()) if !handled { continue } @@ -179,14 +177,13 @@ func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { c.Lock() defer c.Unlock() - actionCopy := action.DeepCopy() c.actions = append(c.actions, action.DeepCopy()) for _, reactor := range c.ProxyReactionChain { - if !reactor.Handles(actionCopy) { + if !reactor.Handles(action) { continue } - handled, ret, err := reactor.React(actionCopy) + handled, ret, err := reactor.React(action.DeepCopy()) if !handled || err != nil { continue } diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index 993fcf6a1..00c4c49fc 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -20,13 +20,11 @@ import ( "fmt" "sync" - jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" @@ -131,46 +129,23 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { case PatchActionImpl: obj, err := tracker.Get(gvr, ns, action.GetName()) if err != nil { - return true, nil, err + // object is not registered + return false, nil, err } old, err := json.Marshal(obj) if err != nil { return true, nil, err } + // Only supports strategic merge patch + // TODO: Add support for other Patch types + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } - switch action.GetPatchType() { - case types.JSONPatchType: - patch, err := jsonpatch.DecodePatch(action.GetPatch()) - if err != nil { - return true, nil, err - } - modified, err := patch.Apply(old) - if err != nil { - return true, nil, err - } - if err = json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.MergePatchType: - modified, err := jsonpatch.MergePatch(old, action.GetPatch()) - if err != nil { - return true, nil, err - } - - if err := json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.StrategicMergePatchType: - mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) - if err != nil { - return true, nil, err - } - if err = json.Unmarshal(mergedByte, obj); err != nil { - return true, nil, err - } - default: - return true, nil, fmt.Errorf("PatchType is not supported") + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err } if err = tracker.Update(gvr, obj, ns); err != nil { @@ -347,10 +322,8 @@ func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watc if w := t.watchers[gvr][ns]; w != nil { watches = append(watches, w...) } - if ns != metav1.NamespaceAll { - if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { - watches = append(watches, w...) - } + if w := t.watchers[gvr][""]; w != nil { + watches = append(watches, w...) } } return watches diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go new file mode 100644 index 000000000..20339ab9d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -0,0 +1,125 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package auth defines a file format for holding authentication +information needed by clients of Kubernetes. Typically, +a Kubernetes cluster will put auth info for the admin in a known +location when it is created, and will (soon) put it in a known +location within a Container's file tree for Containers that +need access to the Kubernetes API. + +Having a defined format allows: + - clients to be implemented in multiple languages + - applications which link clients to be portable across + clusters with different authentication styles (e.g. + some may use SSL Client certs, others may not, etc) + - when the format changes, applications only + need to update this code. + +The file format is json, marshalled from a struct authcfg.Info. + +Clinet libraries in other languages should use the same format. + +It is not intended to store general preferences, such as default +namespace, output options, etc. CLIs (such as kubectl) and UIs should +develop their own format and may wish to inline the authcfg.Info type. + +The authcfg.Info is just a file format. It is distinct from +client.Config which holds options for creating a client.Client. +Helper functions are provided in this package to fill in a +client.Client from an authcfg.Info. + +Example: + + import ( + "pkg/client" + "pkg/client/auth" + ) + + info, err := auth.LoadFromFile(filename) + if err != nil { + // handle error + } + clientConfig = client.Config{} + clientConfig.Host = "example.com:4901" + clientConfig = info.MergeWithConfig() + client := client.New(clientConfig) + client.Pods(ns).List() +*/ +package auth + +// TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated. +import ( + "encoding/json" + "io/ioutil" + "os" + + restclient "k8s.io/client-go/rest" +) + +// Info holds Kubernetes API authorization config. It is intended +// to be read/written from a file as a JSON object. +type Info struct { + User string + Password string + CAFile string + CertFile string + KeyFile string + BearerToken string + Insecure *bool +} + +// LoadFromFile parses an Info object from a file path. +// If the file does not exist, then os.IsNotExist(err) == true +func LoadFromFile(path string) (*Info, error) { + var info Info + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, err + } + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &info) + if err != nil { + return nil, err + } + return &info, err +} + +// MergeWithConfig returns a copy of a client.Config with values from the Info. +// The fields of client.Config with a corresponding field in the Info are set +// with the value from the Info. +func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) { + var config restclient.Config = c + config.Username = info.User + config.Password = info.Password + config.CAFile = info.CAFile + config.CertFile = info.CertFile + config.KeyFile = info.KeyFile + config.BearerToken = info.BearerToken + if info.Insecure != nil { + config.Insecure = *info.Insecure + } + return config, nil +} + +func (info Info) Complete() bool { + return len(info.User) > 0 || + len(info.CertFile) > 0 || + len(info.BearerToken) > 0 +} diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index b5d392520..028c75e8e 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -28,9 +28,9 @@ import ( // Config contains all the settings for a Controller. type Config struct { - // The queue for your objects - has to be a DeltaFIFO due to - // assumptions in the implementation. Your Process() function - // should accept the output of this Queue's Pop() method. + // The queue for your objects; either a FIFO or + // a DeltaFIFO. Your Process() function should accept + // the output of this Queue's Pop() method. Queue // Something that can list and watch your objects. @@ -285,7 +285,45 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + if old, exists, err := clientState.Get(d.Object); err == nil && exists { + if err := clientState.Update(d.Object); err != nil { + return err + } + h.OnUpdate(old, d.Object) + } else { + if err := clientState.Add(d.Object); err != nil { + return err + } + h.OnAdd(d.Object) + } + case Deleted: + if err := clientState.Delete(d.Object); err != nil { + return err + } + h.OnDelete(d.Object) + } + } + return nil + }, + } + return clientState, New(cfg) } // NewIndexerInformer returns a Indexer and a controller for populating the index @@ -314,30 +352,6 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) -} - -// newInformer returns a controller for populating the store while also -// providing event notifications. -// -// Parameters -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// * clientState is the store you want to populate -// -func newInformer( - lw ListerWatcher, - objType runtime.Object, - resyncPeriod time.Duration, - h ResourceEventHandler, - clientState Store, -) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. @@ -376,5 +390,5 @@ func newInformer( return nil }, } - return New(cfg) + return clientState, New(cfg) } diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index f24eec254..45c3b500d 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog" + "github.com/golang/glog" ) // NewDeltaFIFO returns a Store which can be used process changes to items. @@ -320,15 +320,17 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) + _, exists := f.items[id] if len(newDeltas) > 0 { - if _, exists := f.items[id]; !exists { + if !exists { f.queue = append(f.queue, id) } f.items[id] = newDeltas f.cond.Broadcast() - } else { - // We need to remove this from our map (extra items in the queue are - // ignored if they are not in the map). + } else if exists { + // We need to remove this from our map (extra items + // in the queue are ignored if they are not in the + // map). delete(f.items, id) } return nil @@ -346,6 +348,9 @@ func (f *DeltaFIFO) List() []interface{} { func (f *DeltaFIFO) listLocked() []interface{} { list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { + // Copy item's slice so operations on this slice + // won't interfere with the object we return. + item = copyDeltas(item) list = append(list, item.Newest().Object) } return list @@ -393,7 +398,10 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err func (f *DeltaFIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() - return f.closed + if f.closed { + return true + } + return false } // Pop blocks until an item is added to the queue, and then returns it. If @@ -424,10 +432,10 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { } id := f.queue[0] f.queue = f.queue[1:] + item, ok := f.items[id] if f.initialPopulationCount > 0 { f.initialPopulationCount-- } - item, ok := f.items[id] if !ok { // Item may have been deleted subsequently. continue @@ -466,7 +474,6 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if f.knownObjects == nil { // Do deletion detection against our own list. - queuedDeletions := 0 for k, oldItem := range f.items { if keys.Has(k) { continue @@ -475,7 +482,6 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if n := oldItem.Newest(); n != nil { deletedObj = n.Object } - queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { return err } @@ -483,9 +489,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if !f.populated { f.populated = true - // While there shouldn't be any queued deletions in the initial - // population of the queue, it's better to be on the safe side. - f.initialPopulationCount = len(list) + queuedDeletions + f.initialPopulationCount = len(list) } return nil @@ -502,10 +506,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { deletedObj, exists, err := f.knownObjects.GetByKey(k) if err != nil { deletedObj = nil - klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) } else if !exists { deletedObj = nil - klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -549,10 +553,10 @@ func (f *DeltaFIFO) syncKey(key string) error { func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { - klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) return nil } else if !exists { - klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) return nil } diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index 4b00544fc..fa88fc407 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,8 +20,8 @@ import ( "sync" "time" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/klog" ) // ExpirationCache implements the store interface @@ -48,7 +48,7 @@ type ExpirationCache struct { // ExpirationPolicy dictates when an object expires. Currently only abstracted out // so unittests don't rely on the system clock. type ExpirationPolicy interface { - IsExpired(obj *TimestampedEntry) bool + IsExpired(obj *timestampedEntry) bool } // TTLPolicy implements a ttl based ExpirationPolicy. @@ -63,29 +63,26 @@ type TTLPolicy struct { // IsExpired returns true if the given object is older than the ttl, or it can't // determine its age. -func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { - return p.Ttl > 0 && p.Clock.Since(obj.Timestamp) > p.Ttl +func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool { + return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl } -// TimestampedEntry is the only type allowed in a ExpirationCache. -// Keep in mind that it is not safe to share timestamps between computers. -// Behavior may be inconsistent if you get a timestamp from the API Server and -// use it on the client machine as part of your ExpirationCache. -type TimestampedEntry struct { - Obj interface{} - Timestamp time.Time +// timestampedEntry is the only type allowed in a ExpirationCache. +type timestampedEntry struct { + obj interface{} + timestamp time.Time } -// getTimestampedEntry returns the TimestampedEntry stored under the given key. -func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { +// getTimestampedEntry returns the timestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) { item, _ := c.cacheStorage.Get(key) - if tsEntry, ok := item.(*TimestampedEntry); ok { + if tsEntry, ok := item.(*timestampedEntry); ok { return tsEntry, true } return nil, false } -// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't +// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't // already expired. It holds a write lock across deletion. func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { // Prevent all inserts from the time we deem an item as "expired" to when we @@ -98,11 +95,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) + glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) c.cacheStorage.Delete(key) return nil, false } - return timestampedItem.Obj, true + return timestampedItem.obj, true } // GetByKey returns the item stored under the key, or sets exists=false. @@ -129,7 +126,7 @@ func (c *ExpirationCache) List() []interface{} { list := make([]interface{}, 0, len(items)) for _, item := range items { - obj := item.(*TimestampedEntry).Obj + obj := item.(*timestampedEntry).obj if key, err := c.keyFunc(obj); err != nil { list = append(list, obj) } else if obj, exists := c.getOrExpire(key); exists { @@ -147,14 +144,14 @@ func (c *ExpirationCache) ListKeys() []string { // Add timestamps an item and inserts it into the cache, overwriting entries // that might exist under the same key. func (c *ExpirationCache) Add(obj interface{}) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } - c.expirationLock.Lock() - defer c.expirationLock.Unlock() - - c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now()}) + c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()}) return nil } @@ -166,12 +163,12 @@ func (c *ExpirationCache) Update(obj interface{}) error { // Delete removes an item from the cache. func (c *ExpirationCache) Delete(obj interface{}) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } - c.expirationLock.Lock() - defer c.expirationLock.Unlock() c.cacheStorage.Delete(key) return nil } @@ -180,17 +177,17 @@ func (c *ExpirationCache) Delete(obj interface{}) error { // before attempting the replace operation. The replace operation will // delete the contents of the ExpirationCache `c`. func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { - items := make(map[string]interface{}, len(list)) + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + items := map[string]interface{}{} ts := c.clock.Now() for _, item := range list { key, err := c.keyFunc(item) if err != nil { return KeyError{item, err} } - items[key] = &TimestampedEntry{item, ts} + items[key] = ×tampedEntry{item, ts} } - c.expirationLock.Lock() - defer c.expirationLock.Unlock() c.cacheStorage.Replace(items, resourceVersion) return nil } @@ -202,15 +199,10 @@ func (c *ExpirationCache) Resync() error { // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { - return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) -} - -// NewExpirationStore creates and returns a ExpirationCache for a given policy -func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, clock: clock.RealClock{}, - expirationPolicy: expirationPolicy, + expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}}, } } diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index d61db3d51..a096765f6 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -38,7 +38,7 @@ type FakeExpirationPolicy struct { RetrieveKeyFunc KeyFunc } -func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { +func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { key, _ := p.RetrieveKeyFunc(obj) return !p.NeverExpire.Has(key) } diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go index b59e2eb27..8d71c2474 100644 --- a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -40,7 +40,7 @@ func (f *FakeCustomStore) Add(obj interface{}) error { // Update calls the custom Update function if defined func (f *FakeCustomStore) Update(obj interface{}) error { if f.UpdateFunc != nil { - return f.UpdateFunc(obj) + return f.Update(obj) } return nil } diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 508c5530c..e05c01ee2 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -297,7 +297,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // after calling this function. f's queue is reset, too; upon return, it // will contain the items in the map, in no particular order. func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { - items := make(map[string]interface{}, len(list)) + items := map[string]interface{}{} for _, item := range list { key, err := f.keyFunc(item) if err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go index 7357ff97a..78e492455 100644 --- a/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -204,7 +204,7 @@ func (h *Heap) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresentLocked assumes the lock is already held and adds the provided +// addIfNotPresentLocked assumes the lock is already held and adds the the provided // item to the queue if it does not already exist. func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { if _, exists := h.data.items[key]; exists { diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 311ff8c49..27d51a6b3 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -17,7 +17,7 @@ limitations under the License. package cache import ( - "k8s.io/klog" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -31,14 +31,7 @@ import ( type AppendFunc func(interface{}) func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { - selectAll := selector.Empty() for _, m := range store.List() { - if selectAll { - // Avoid computing labels of the objects to speed up common flows - // of listing all objects. - appendFn(m) - continue - } metadata, err := meta.Accessor(m) if err != nil { return err @@ -51,15 +44,8 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { } func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { - selectAll := selector.Empty() if namespace == metav1.NamespaceAll { for _, m := range indexer.List() { - if selectAll { - // Avoid computing labels of the objects to speed up common flows - // of listing all objects. - appendFn(m) - continue - } metadata, err := meta.Accessor(m) if err != nil { return err @@ -74,7 +60,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) if err != nil { // Ignore error; do slow search without index. - klog.Warningf("can not retrieve list of objects using index : %v", err) + glog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range indexer.List() { metadata, err := meta.Accessor(m) if err != nil { @@ -88,12 +74,6 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec return nil } for _, m := range items { - if selectAll { - // Avoid computing labels of the objects to speed up common flows - // of listing all objects. - appendFn(m) - continue - } metadata, err := meta.Accessor(m) if err != nil { return err diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index 8227b73b6..f86791650 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -27,25 +27,15 @@ import ( "k8s.io/client-go/tools/pager" ) -// Lister is any object that knows how to perform an initial list. -type Lister interface { +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { // List should return a list type object; the Items field will be extracted, and the // ResourceVersion field will be used to start the watch in the right place. List(options metav1.ListOptions) (runtime.Object, error) -} - -// Watcher is any object that knows how to start a watch on a resource. -type Watcher interface { // Watch should begin a watch at the specified version. Watch(options metav1.ListOptions) (watch.Interface, error) } -// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. -type ListerWatcher interface { - Lister - Watcher -} - // ListFunc knows how to list resources type ListFunc func(options metav1.ListOptions) (runtime.Object, error) diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go index 4c6686e91..cbb6434eb 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "k8s.io/klog" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er } elements, err := fn(updated) if err != nil { - klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) continue } for _, inIndex := range elements { diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index adb5b8be8..e2aa44848 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "k8s.io/klog" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" @@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } - klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + glog.Warningln("Mutation detector is enabled, this will result in memory leakage.") return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 2daa44ba5..9ee7efcbb 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -17,7 +17,6 @@ limitations under the License. package cache import ( - "context" "errors" "fmt" "io" @@ -25,10 +24,14 @@ import ( "net" "net/url" "reflect" + "strconv" + "strings" "sync" + "sync/atomic" "syscall" "time" + "github.com/golang/glog" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,9 +41,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/pager" - "k8s.io/klog" - "k8s.io/utils/trace" ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. @@ -69,9 +69,6 @@ type Reflector struct { lastSyncResourceVersion string // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex - // WatchListPageSize is the requested chunk size of initial and resync watch lists. - // Defaults to pager.PageSize. - WatchListPageSize int64 } var ( @@ -83,7 +80,7 @@ var ( // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { - indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc}) reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) return indexer, reflector } @@ -98,10 +95,17 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) } +// reflectorDisambiguator is used to disambiguate started reflectors. +// initialized to an unstable value to ensure meaning isn't attributed to the suffix. +var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345) + // NewNamedReflector same as NewReflector, but with a specified name for logging func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1) r := &Reflector{ - name: name, + name: name, + // we need this to be unique per process (some names are still the same) but obvious who it belongs to + metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -112,6 +116,11 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } +func makeValidPrometheusMetricLabel(in string) string { + // this isn't perfect, but it removes our common characters + return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) +} + // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors var internalPackages = []string{"client-go/tools/cache/"} @@ -119,7 +128,7 @@ var internalPackages = []string{"client-go/tools/cache/"} // Run starts a watch and handles watch events. Will restart the watch if it is closed. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) wait.Until(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) @@ -157,71 +166,34 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) var resourceVersion string // Explicitly set "0" as resource version - it's fine for the List() // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. options := metav1.ListOptions{ResourceVersion: "0"} - - if err := func() error { - initTrace := trace.New("Reflector " + r.name + " ListAndWatch") - defer initTrace.LogIfLong(10 * time.Second) - var list runtime.Object - var err error - listCh := make(chan struct{}, 1) - panicCh := make(chan interface{}, 1) - go func() { - defer func() { - if r := recover(); r != nil { - panicCh <- r - } - }() - // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first - // list request will return the full response. - pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { - return r.listerWatcher.List(opts) - })) - if r.WatchListPageSize != 0 { - pager.PageSize = r.WatchListPageSize - } - // Pager falls back to full list if paginated list calls fail due to an "Expired" error. - list, err = pager.List(context.Background(), options) - close(listCh) - }() - select { - case <-stopCh: - return nil - case r := <-panicCh: - panic(r) - case <-listCh: - } - if err != nil { - return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) - } - initTrace.Step("Objects listed") - listMetaInterface, err := meta.ListAccessor(list) - if err != nil { - return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) - } - resourceVersion = listMetaInterface.GetResourceVersion() - initTrace.Step("Resource version extracted") - items, err := meta.ExtractList(list) - if err != nil { - return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) - } - initTrace.Step("Objects extracted") - if err := r.syncWith(items, resourceVersion); err != nil { - return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) - } - initTrace.Step("SyncWith done") - r.setLastSyncResourceVersion(resourceVersion) - initTrace.Step("Resource version updated") - return nil - }(); err != nil { - return err + r.metrics.numberOfLists.Inc() + start := r.clock.Now() + list, err := r.listerWatcher.List(options) + if err != nil { + return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) } + r.metrics.listDuration.Observe(time.Since(start).Seconds()) + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) + } + r.metrics.numberOfItemsInList.Observe(float64(len(items))) + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) + } + r.setLastSyncResourceVersion(resourceVersion) resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) @@ -240,7 +212,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return } if r.ShouldResync == nil || r.ShouldResync() { - klog.V(4).Infof("%s: forcing resync", r.name) + glog.V(4).Infof("%s: forcing resync", r.name) if err := r.store.Resync(); err != nil { resyncerrc <- err return @@ -265,20 +237,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. TimeoutSeconds: &timeoutSeconds, - // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. - // Reflector doesn't assume bookmarks are returned at all (if the server do not support - // watch bookmarks, it will ignore this field). - // Disabled in Alpha release of watch bookmarks feature. - AllowWatchBookmarks: false, } + r.metrics.numberOfWatches.Inc() w, err := r.listerWatcher.Watch(options) if err != nil { switch err { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) default: utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) } @@ -299,7 +267,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) } return nil } @@ -323,6 +291,11 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err // Stopping the watcher should be idempotent and if we return from this function there's no way // we're coming back in with the same watch interface. defer w.Stop() + // update metrics + defer func() { + r.metrics.numberOfItemsInWatch.Observe(float64(eventCount)) + r.metrics.watchDuration.Observe(time.Since(start).Seconds()) + }() loop: for { @@ -367,8 +340,6 @@ loop: if err != nil { utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) } - case watch.Bookmark: - // A `Bookmark` means watch has synced here, just update the resourceVersion default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) } @@ -378,11 +349,12 @@ loop: } } - watchDuration := r.clock.Since(start) + watchDuration := r.clock.Now().Sub(start) if watchDuration < 1*time.Second && eventCount == 0 { + r.metrics.numberOfShortWatches.Inc() return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } - klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) return nil } @@ -398,4 +370,9 @@ func (r *Reflector) setLastSyncResourceVersion(v string) { r.lastSyncResourceVersionMutex.Lock() defer r.lastSyncResourceVersionMutex.Unlock() r.lastSyncResourceVersion = v + + rv, err := strconv.Atoi(v) + if err == nil { + r.metrics.lastResourceVersion.Set(float64(rv)) + } } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go index dd849c8fa..0945e5c3a 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -94,6 +94,23 @@ var metricsFactory = struct { metricsProvider: noopMetricsProvider{}, } +func newReflectorMetrics(name string) *reflectorMetrics { + var ret *reflectorMetrics + if len(name) == 0 { + return ret + } + return &reflectorMetrics{ + numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name), + listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name), + numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name), + numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name), + numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name), + watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name), + numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name), + lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name), + } +} + // SetReflectorMetricsProvider sets the metrics provider func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { metricsFactory.setProviders.Do(func() { diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 3271d959f..5f8c507f9 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -25,90 +25,37 @@ import ( "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/buffer" "k8s.io/client-go/util/retry" - "k8s.io/utils/buffer" - "k8s.io/klog" + "github.com/golang/glog" ) -// SharedInformer provides eventually consistent linkage of its -// clients to the authoritative state of a given collection of -// objects. An object is identified by its API group, kind/resource, -// namespace, and name. One SharedInfomer provides linkage to objects -// of a particular API group and kind/resource. The linked object -// collection of a SharedInformer may be further restricted to one -// namespace and/or by label selector and/or field selector. -// -// The authoritative state of an object is what apiservers provide -// access to, and an object goes through a strict sequence of states. -// A state is either "absent" or present with a ResourceVersion and -// other appropriate content. -// -// A SharedInformer maintains a local cache, exposed by Store(), of -// the state of each relevant object. This cache is eventually -// consistent with the authoritative state. This means that, unless -// prevented by persistent communication problems, if ever a -// particular object ID X is authoritatively associated with a state S -// then for every SharedInformer I whose collection includes (X, S) -// eventually either (1) I's cache associates X with S or a later -// state of X, (2) I is stopped, or (3) the authoritative state -// service for X terminates. To be formally complete, we say that the -// absent state meets any restriction by label selector or field -// selector. -// -// As a simple example, if a collection of objects is henceforeth -// unchanging and a SharedInformer is created that links to that -// collection then that SharedInformer's cache eventually holds an -// exact copy of that collection (unless it is stopped too soon, the -// authoritative state service ends, or communication problems between -// the two persistently thwart achievement). -// -// As another simple example, if the local cache ever holds a -// non-absent state for some object ID and the object is eventually -// removed from the authoritative state then eventually the object is -// removed from the local cache (unless the SharedInformer is stopped -// too soon, the authoritative state service emnds, or communication -// problems persistently thwart the desired result). -// -// The keys in Store() are of the form namespace/name for namespaced -// objects, and are simply the name for non-namespaced objects. -// -// A client is identified here by a ResourceEventHandler. For every -// update to the SharedInformer's local cache and for every client, -// eventually either the SharedInformer is stopped or the client is -// notified of the update. These notifications happen after the -// corresponding cache update and, in the case of a -// SharedIndexInformer, after the corresponding index updates. It is -// possible that additional cache and index updates happen before such -// a prescribed notification. For a given SharedInformer and client, -// all notifications are delivered sequentially. For a given -// SharedInformer, client, and object ID, the notifications are -// delivered in order. -// -// A delete notification exposes the last locally known non-absent -// state, except that its ResourceVersion is replaced with a -// ResourceVersion in which the object is actually absent. +// SharedInformer has a shared data cache and is capable of distributing notifications for changes +// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is +// one behavior change compared to a standard Informer. When you receive a notification, the cache +// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend +// on the contents of the cache exactly matching the notification you've received in handler +// functions. If there was a create, followed by a delete, the cache may NOT have your item. This +// has advantages over the broadcaster since it allows us to share a common cache across many +// controllers. Extending the broadcaster would have required us keep duplicate caches for each +// watch. type SharedInformer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. AddEventHandler(handler ResourceEventHandler) - // AddEventHandlerWithResyncPeriod adds an event handler to the - // shared informer using the specified resync period. The resync - // operation consists of delivering to the handler a create - // notification for every object in the informer's local cache; it - // does not add any interactions with the authoritative storage. + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) - // GetStore returns the informer's local cache as a Store. + // GetStore returns the Store. GetStore() Store // GetController gives back a synthetic interface that "votes" to start the informer GetController() Controller - // Run starts and runs the shared informer, returning after it stops. - // The informer will be stopped when stopCh is closed. + // Run starts the shared informer, which will be stopped when stopCh is closed. Run(stopCh <-chan struct{}) - // HasSynced returns true if the shared informer's store has been - // informed by at least one full LIST of the authoritative state - // of the informer's object collection. This is unrelated to "resync". + // HasSynced returns true if the shared informer's store has synced. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -139,7 +86,7 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve resyncCheckPeriod: defaultEventHandlerResyncPeriod, defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), - clock: realClock, + clock: realClock, } return sharedIndexInformer } @@ -169,11 +116,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - klog.V(2).Infof("stop requested") + glog.V(2).Infof("stop requested") return false } - klog.V(4).Infof("caches populated") + glog.V(4).Infof("caches populated") return true } @@ -332,11 +279,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration { return desired } if check == 0 { - klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) return 0 } if desired < check { - klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) return check } return desired @@ -349,19 +296,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv defer s.startedLock.Unlock() if s.stopped { - klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) return } if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update @@ -608,7 +555,7 @@ func (p *processorListener) run() { case deleteNotification: p.handler.OnDelete(notification.oldObj) default: - utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next)) + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) } } // the only way to get here is if the p.nextCh is empty and closed diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go index fc844efe6..4958987f0 100755 --- a/vendor/k8s.io/client-go/tools/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -210,7 +210,7 @@ func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) // 'c' takes ownership of the list, you should not reference the list again // after calling this function. func (c *cache) Replace(list []interface{}, resourceVersion string) error { - items := make(map[string]interface{}, len(list)) + items := map[string]interface{}{} for _, item := range list { key, err := c.keyFunc(item) if err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index b74faa019..1c201efb6 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -148,19 +148,12 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, } index := c.indices[indexName] - var returnKeySet sets.String - if len(indexKeys) == 1 { - // In majority of cases, there is exactly one value matching. - // Optimize the most common path - deduping is not needed here. - returnKeySet = index[indexKeys[0]] - } else { - // Need to de-dupe the return list. - // Since multiple keys are allowed, this can happen. - returnKeySet = sets.String{} - for _, indexKey := range indexKeys { - for key := range index[indexKey] { - returnKeySet.Insert(key) - } + // need to de-dupe the return list. Since multiple keys are allowed, this can happen. + returnKeySet := sets.String{} + for _, indexKey := range indexKeys { + set := index[indexKey] + for _, key := range set.UnsortedList() { + returnKeySet.Insert(key) } } @@ -185,7 +178,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro set := index[indexKey] list := make([]interface{}, 0, set.Len()) - for key := range set { + for _, key := range set.List() { list = append(list, c.items[key]) } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 5871575a6..0a081871a 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -15,5 +15,4 @@ limitations under the License. */ // +k8s:deepcopy-gen=package - package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go new file mode 100644 index 000000000..35bb5dde1 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go @@ -0,0 +1,61 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package latest + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/tools/clientcmd/api/v1" +) + +// Version is the string that represents the current external default version. +const Version = "v1" + +var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"} + +// OldestVersion is the string that represents the oldest server version supported, +// for client code that wants to hardcode the lowest common denominator. +const OldestVersion = "v1" + +// Versions is the list of versions that are recognized in code. The order provided +// may be assumed to be least feature rich to most feature rich, and clients may +// choose to prefer the latter items in the list over the former items when presented +// with a set of versions to choose. +var Versions = []string{"v1"} + +var ( + Codec runtime.Codec + Scheme *runtime.Scheme +) + +func init() { + Scheme = runtime.NewScheme() + utilruntime.Must(api.AddToScheme(Scheme)) + utilruntime.Must(v1.AddToScheme(Scheme)) + yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme) + Codec = versioning.NewDefaultingCodecForScheme( + Scheme, + yamlSerializer, + yamlSerializer, + schema.GroupVersion{Version: Version}, + runtime.InternalGroupVersioner, + ) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 990a440c6..1391df702 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -17,8 +17,6 @@ limitations under the License. package api import ( - "fmt" - "k8s.io/apimachinery/pkg/runtime" ) @@ -152,25 +150,6 @@ type AuthProviderConfig struct { Config map[string]string `json:"config,omitempty"` } -var _ fmt.Stringer = new(AuthProviderConfig) -var _ fmt.GoStringer = new(AuthProviderConfig) - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of -// AuthProviderConfig to prevent accidental leaking via logs. -func (c AuthProviderConfig) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of -// AuthProviderConfig to prevent accidental leaking via logs. -func (c AuthProviderConfig) String() string { - cfg := "" - if c.Config != nil { - cfg = "--- REDACTED ---" - } - return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg) -} - // ExecConfig specifies a command to provide client credentials. The command is exec'd // and outputs structured stdout holding credentials. // @@ -193,29 +172,6 @@ type ExecConfig struct { APIVersion string `json:"apiVersion,omitempty"` } -var _ fmt.Stringer = new(ExecConfig) -var _ fmt.GoStringer = new(ExecConfig) - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of -// ExecConfig to prevent accidental leaking via logs. -func (c ExecConfig) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig -// to prevent accidental leaking via logs. -func (c ExecConfig) String() string { - var args []string - if len(c.Args) > 0 { - args = []string{"--- REDACTED ---"} - } - env := "[]ExecEnvVar(nil)" - if len(c.Env) > 0 { - env = "[]ExecEnvVar{--- REDACTED ---}" - } - return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) -} - // ExecEnvVar is used for setting environment variables when executing an exec-based // credential plugin. type ExecEnvVar struct { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go new file mode 100644 index 000000000..2d7142e6e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -0,0 +1,244 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd/api" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + func(in *Cluster, out *api.Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Cluster, out *Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Preferences, out *api.Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Preferences, out *Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Context, out *api.Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Context, out *Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + + func(in *Config, out *api.Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make(map[string]*api.Cluster) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make(map[string]*api.AuthInfo) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make(map[string]*api.Context) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make(map[string]runtime.Object) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *api.Config, out *Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make([]NamedCluster, 0, 0) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make([]NamedAuthInfo, 0, 0) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make([]NamedContext, 0, 0) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make([]NamedExtension, 0, 0) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { + for _, curr := range *in { + newCluster := api.NewCluster() + if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newCluster + } else { + return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newCluster := (*in)[key] + oldCluster := &Cluster{} + if err := s.Convert(newCluster, oldCluster, 0); err != nil { + return err + } + + namedCluster := NamedCluster{key, *oldCluster} + *out = append(*out, namedCluster) + } + + return nil + }, + func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { + for _, curr := range *in { + newAuthInfo := api.NewAuthInfo() + if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newAuthInfo + } else { + return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newAuthInfo := (*in)[key] + oldAuthInfo := &AuthInfo{} + if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { + return err + } + + namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} + *out = append(*out, namedAuthInfo) + } + + return nil + }, + func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { + for _, curr := range *in { + newContext := api.NewContext() + if err := s.Convert(&curr.Context, newContext, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newContext + } else { + return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newContext := (*in)[key] + oldContext := &Context{} + if err := s.Convert(newContext, oldContext, 0); err != nil { + return err + } + + namedContext := NamedContext{key, *oldContext} + *out = append(*out, namedContext) + } + + return nil + }, + func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { + for _, curr := range *in { + var newExtension runtime.Object + if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newExtension + } else { + return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newExtension := (*in)[key] + oldExtension := &runtime.RawExtension{} + if err := s.Convert(newExtension, oldExtension, 0); err != nil { + return err + } + + namedExtension := NamedExtension{key, *oldExtension} + *out = append(*out, namedExtension) + } + + return nil + }, + ) +} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go similarity index 82% rename from vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/doc.go rename to vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index e6614c0da..9750cf73a 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +15,4 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:defaulter-gen=TypeMeta -// +groupName=example.crd.code-generator.k8s.io package v1 diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go similarity index 66% rename from vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/register.go rename to vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go index 58371e0e9..7b91d5090 100644 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/register.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,12 +17,13 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -var SchemeGroupVersion = schema.GroupVersion{Group: "example.crd.code-generator.k8s.io", Version: "v1"} +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} var ( // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. @@ -36,24 +37,20 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addKnownTypes, addConversionFuncs) } -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &TestType{}, - &TestTypeList{}, - ) - - scheme.AddKnownTypes(SchemeGroupVersion, - &metav1.Status{}, + &Config{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go new file mode 100644 index 000000000..56afb608a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -0,0 +1,203 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters []NamedCluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos []NamedAuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts []NamedContext `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to imperonate. The name matches the flag. + // +optional + Impersonate string `json:"as,omitempty"` + // ImpersonateGroups is the groups to imperonate. + // +optional + ImpersonateGroups []string `json:"as-groups,omitempty"` + // ImpersonateUserExtra contains additional information for impersonated user. + // +optional + ImpersonateUserExtra map[string][]string `json:"as-user-extra,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster. + // +optional + Exec *ExecConfig `json:"exec,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster Cluster `json:"cluster"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context Context `json:"context"` +} + +// NamedAuthInfo relates nicknames to auth information +type NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo AuthInfo `json:"user"` +} + +// NamedExtension relates nicknames to extension information +type NamedExtension struct { + // Name is the nickname for this Extension + Name string `json:"name"` + // Extension holds the extension information + Extension runtime.RawExtension `json:"extension"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` +} + +// ExecConfig specifies a command to provide client credentials. The command is exec'd +// and outputs structured stdout holding credentials. +// +// See the client.authentiction.k8s.io API group for specifications of the exact input +// and output format +type ExecConfig struct { + // Command to execute. + Command string `json:"command"` + // Arguments to pass to the command when executing it. + // +optional + Args []string `json:"args"` + // Env defines additional environment variables to expose to the process. These + // are unioned with the host's environment, as well as variables client-go uses + // to pass argument to the plugin. + // +optional + Env []ExecEnvVar `json:"env"` + + // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use + // the same encoding version as the input. + APIVersion string `json:"apiVersion,omitempty"` +} + +// ExecEnvVar is used for setting environment variables when executing an exec-based +// credential plugin. +type ExecEnvVar struct { + Name string `json:"name"` + Value string `json:"value"` +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..da519dfa3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,348 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { + *out = *in + if in.ClientCertificateData != nil { + in, out := &in.ClientCertificateData, &out.ClientCertificateData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ClientKeyData != nil { + in, out := &in.ClientKeyData, &out.ClientKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ImpersonateGroups != nil { + in, out := &in.ImpersonateGroups, &out.ImpersonateGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ImpersonateUserExtra != nil { + in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AuthProvider != nil { + in, out := &in.AuthProvider, &out.AuthProvider + *out = new(AuthProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecConfig) + (*in).DeepCopyInto(*out) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo. +func (in *AuthInfo) DeepCopy() *AuthInfo { + if in == nil { + return nil + } + out := new(AuthInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig. +func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig { + if in == nil { + return nil + } + out := new(AuthProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + in.Preferences.DeepCopyInto(&out.Preferences) + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]NamedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AuthInfos != nil { + in, out := &in.AuthInfos, &out.AuthInfos + *out = make([]NamedAuthInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Contexts != nil { + in, out := &in.Contexts, &out.Contexts + *out = make([]NamedContext, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Context) DeepCopyInto(out *Context) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context. +func (in *Context) DeepCopy() *Context { + if in == nil { + return nil + } + out := new(Context) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]ExecEnvVar, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig. +func (in *ExecConfig) DeepCopy() *ExecConfig { + if in == nil { + return nil + } + out := new(ExecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar. +func (in *ExecEnvVar) DeepCopy() *ExecEnvVar { + if in == nil { + return nil + } + out := new(ExecEnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedAuthInfo) DeepCopyInto(out *NamedAuthInfo) { + *out = *in + in.AuthInfo.DeepCopyInto(&out.AuthInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedAuthInfo. +func (in *NamedAuthInfo) DeepCopy() *NamedAuthInfo { + if in == nil { + return nil + } + out := new(NamedAuthInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCluster) DeepCopyInto(out *NamedCluster) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCluster. +func (in *NamedCluster) DeepCopy() *NamedCluster { + if in == nil { + return nil + } + out := new(NamedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedContext) DeepCopyInto(out *NamedContext) { + *out = *in + in.Context.DeepCopyInto(&out.Context) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedContext. +func (in *NamedContext) DeepCopy() *NamedContext { + if in == nil { + return nil + } + out := new(NamedContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedExtension) DeepCopyInto(out *NamedExtension) { + *out = *in + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedExtension. +func (in *NamedExtension) DeepCopy() *NamedExtension { + if in == nil { + return nil + } + out := new(NamedExtension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preferences) DeepCopyInto(out *Preferences) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences. +func (in *Preferences) DeepCopy() *Preferences { + if in == nil { + return nil + } + out := new(Preferences) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go new file mode 100644 index 000000000..1d3c11d8f --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go @@ -0,0 +1,111 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + + "golang.org/x/crypto/ssh/terminal" + + clientauth "k8s.io/client-go/tools/auth" +) + +// AuthLoaders are used to build clientauth.Info objects. +type AuthLoader interface { + // LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info + LoadAuth(path string) (*clientauth.Info, error) +} + +// default implementation of an AuthLoader +type defaultAuthLoader struct{} + +// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile +func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { + return clientauth.LoadFromFile(path) +} + +type PromptingAuthLoader struct { + reader io.Reader +} + +// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. +func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { + // Prompt for user/pass and write a file if none exists. + if _, err := os.Stat(path); os.IsNotExist(err) { + authPtr, err := a.Prompt() + auth := *authPtr + if err != nil { + return nil, err + } + data, err := json.Marshal(auth) + if err != nil { + return &auth, err + } + err = ioutil.WriteFile(path, data, 0600) + return &auth, err + } + authPtr, err := clientauth.LoadFromFile(path) + if err != nil { + return nil, err + } + return authPtr, nil +} + +// Prompt pulls the user and password from a reader +func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) { + var err error + auth := &clientauth.Info{} + auth.User, err = promptForString("Username", a.reader, true) + if err != nil { + return nil, err + } + auth.Password, err = promptForString("Password", nil, false) + if err != nil { + return nil, err + } + return auth, nil +} + +func promptForString(field string, r io.Reader, show bool) (result string, err error) { + fmt.Printf("Please enter %s: ", field) + if show { + _, err = fmt.Fscan(r, &result) + } else { + var data []byte + if terminal.IsTerminal(int(os.Stdin.Fd())) { + data, err = terminal.ReadPassword(int(os.Stdin.Fd())) + result = string(data) + } else { + return "", fmt.Errorf("error reading input for %s", field) + } + } + return result, err +} + +// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. +func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader { + return &PromptingAuthLoader{reader} +} + +// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file +func NewDefaultAuthLoader() AuthLoader { + return &defaultAuthLoader{} +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go new file mode 100644 index 000000000..393868e7a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -0,0 +1,569 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "strings" + + "github.com/golang/glog" + "github.com/imdario/mergo" + + restclient "k8s.io/client-go/rest" + clientauth "k8s.io/client-go/tools/auth" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields + // DEPRECATED will be replaced + ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} + // DefaultClientConfig represents the legacy behavior of this package for defaulting + // DEPRECATED will be replace + DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ + ClusterDefaults: ClusterDefaults, + }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} +) + +// getDefaultServer returns a default setting for DefaultClientConfig +// DEPRECATED +func getDefaultServer() string { + if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { + return server + } + return "http://localhost:8080" +} + +// ClientConfig is used to make it easy to get an api server client +type ClientConfig interface { + // RawConfig returns the merged result of all overrides + RawConfig() (clientcmdapi.Config, error) + // ClientConfig returns a complete client config + ClientConfig() (*restclient.Config, error) + // Namespace returns the namespace resulting from the merged + // result of all overrides and a boolean indicating if it was + // overridden + Namespace() (string, bool, error) + // ConfigAccess returns the rules for loading/persisting the config. + ConfigAccess() ConfigAccess +} + +type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister + +type promptedCredentials struct { + username string + password string +} + +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type DirectClientConfig struct { + config clientcmdapi.Config + contextName string + overrides *ConfigOverrides + fallbackReader io.Reader + configAccess ConfigAccess + // promptedCredentials store the credentials input by the user + promptedCredentials promptedCredentials +} + +// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name +func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { + return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} +} + +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} +} + +// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags +func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} +} + +// NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig +func NewClientConfigFromBytes(configBytes []byte) (ClientConfig, error) { + config, err := Load(configBytes) + if err != nil { + return nil, err + } + + return &DirectClientConfig{*config, "", &ConfigOverrides{}, nil, nil, promptedCredentials{}}, nil +} + +// RESTConfigFromKubeConfig is a convenience method to give back a restconfig from your kubeconfig bytes. +// For programmatic access, this is what you want 80% of the time +func RESTConfigFromKubeConfig(configBytes []byte) (*restclient.Config, error) { + clientConfig, err := NewClientConfigFromBytes(configBytes) + if err != nil { + return nil, err + } + return clientConfig.ClientConfig() +} + +func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { + return config.config, nil +} + +// ClientConfig implements ClientConfig +func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { + // check that getAuthInfo, getContext, and getCluster do not return an error. + // Do this before checking if the current config is usable in the event that an + // AuthInfo, Context, or Cluster config with user-defined names are not found. + // This provides a user with the immediate cause for error if one is found + configAuthInfo, err := config.getAuthInfo() + if err != nil { + return nil, err + } + + _, err = config.getContext() + if err != nil { + return nil, err + } + + configClusterInfo, err := config.getCluster() + if err != nil { + return nil, err + } + + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + clientConfig := &restclient.Config{} + clientConfig.Host = configClusterInfo.Server + + if len(config.overrides.Timeout) > 0 { + timeout, err := ParseTimeout(config.overrides.Timeout) + if err != nil { + return nil, err + } + clientConfig.Timeout = timeout + } + + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + if len(configAuthInfo.Impersonate) > 0 { + clientConfig.Impersonate = restclient.ImpersonationConfig{ + UserName: configAuthInfo.Impersonate, + Groups: configAuthInfo.ImpersonateGroups, + Extra: configAuthInfo.ImpersonateUserExtra, + } + } + + // only try to read the auth information if we are secure + if restclient.IsConfigTransportTLS(*clientConfig) { + var err error + var persister restclient.AuthProviderConfigPersister + if config.configAccess != nil { + authInfoName, _ := config.getAuthInfoName() + persister = PersisterForUser(config.configAccess, authInfoName) + } + userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig) + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig) + } + + return clientConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restclient.Config{} + configClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + mergo.MergeWithOverwrite(mergedConfig, configClientConfig) + + return mergedConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identification +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } else if len(configAuthInfo.TokenFile) > 0 { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { + return nil, err + } + mergedConfig.BearerToken = string(tokenBytes) + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile + } + if len(configAuthInfo.Impersonate) > 0 { + mergedConfig.Impersonate = restclient.ImpersonationConfig{ + UserName: configAuthInfo.Impersonate, + Groups: configAuthInfo.ImpersonateGroups, + Extra: configAuthInfo.ImpersonateUserExtra, + } + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + if configAuthInfo.AuthProvider != nil { + mergedConfig.AuthProvider = configAuthInfo.AuthProvider + mergedConfig.AuthConfigPersister = persistAuthConfig + } + if configAuthInfo.Exec != nil { + mergedConfig.ExecProvider = configAuthInfo.Exec + } + + // if there still isn't enough information to authenticate the user, try prompting + if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { + if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 { + mergedConfig.Username = config.promptedCredentials.username + mergedConfig.Password = config.promptedCredentials.password + return mergedConfig, nil + } + prompter := NewPromptingAuthLoader(fallbackReader) + promptedAuthInfo, err := prompter.Prompt() + if err != nil { + return nil, err + } + promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) + previouslyMergedConfig := mergedConfig + mergedConfig = &restclient.Config{} + mergo.MergeWithOverwrite(mergedConfig, promptedConfig) + mergo.MergeWithOverwrite(mergedConfig, previouslyMergedConfig) + config.promptedCredentials.username = mergedConfig.Username + config.promptedCredentials.password = mergedConfig.Password + } + + return mergedConfig, nil +} + +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information +func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { + config := &restclient.Config{} + config.Username = info.User + config.Password = info.Password + config.CertFile = info.CertFile + config.KeyFile = info.KeyFile + config.BearerToken = info.BearerToken + return config +} + +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information +func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { + config := restclient.Config{} + config.CAFile = info.CAFile + if info.Insecure != nil { + config.Insecure = *info.Insecure + } + return config +} + +func canIdentifyUser(config restclient.Config) bool { + return len(config.Username) > 0 || + (len(config.CertFile) > 0 || len(config.CertData) > 0) || + len(config.BearerToken) > 0 || + config.AuthProvider != nil || + config.ExecProvider != nil +} + +// Namespace implements ClientConfig +func (config *DirectClientConfig) Namespace() (string, bool, error) { + if config.overrides != nil && config.overrides.Context.Namespace != "" { + // In the event we have an empty config but we do have a namespace override, we should return + // the namespace override instead of having config.ConfirmUsable() return an error. This allows + // things like in-cluster clients to execute `kubectl get pods --namespace=foo` and have the + // --namespace flag honored instead of being ignored. + return config.overrides.Context.Namespace, true, nil + } + + if err := config.ConfirmUsable(); err != nil { + return "", false, err + } + + configContext, err := config.getContext() + if err != nil { + return "", false, err + } + + if len(configContext.Namespace) == 0 { + return "default", false, nil + } + + return configContext.Namespace, false, nil +} + +// ConfigAccess implements ClientConfig +func (config *DirectClientConfig) ConfigAccess() ConfigAccess { + return config.configAccess +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *DirectClientConfig) ConfirmUsable() error { + validationErrors := make([]error, 0) + + var contextName string + if len(config.contextName) != 0 { + contextName = config.contextName + } else { + contextName = config.config.CurrentContext + } + + if len(contextName) > 0 { + _, exists := config.config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + } + + authInfoName, _ := config.getAuthInfoName() + authInfo, _ := config.getAuthInfo() + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) + clusterName, _ := config.getClusterName() + cluster, _ := config.getCluster() + validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName returns the default, or user-set context name, and a boolean that indicates +// whether the default context name has been overwritten by a user-set flag, or left as its default value +func (config *DirectClientConfig) getContextName() (string, bool) { + if len(config.overrides.CurrentContext) != 0 { + return config.overrides.CurrentContext, true + } + if len(config.contextName) != 0 { + return config.contextName, false + } + + return config.config.CurrentContext, false +} + +// getAuthInfoName returns a string containing the current authinfo name for the current context, +// and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or +// left as its default value +func (config *DirectClientConfig) getAuthInfoName() (string, bool) { + if len(config.overrides.Context.AuthInfo) != 0 { + return config.overrides.Context.AuthInfo, true + } + context, _ := config.getContext() + return context.AuthInfo, false +} + +// getClusterName returns a string containing the default, or user-set cluster name, and a boolean +// indicating whether the default clusterName has been overwritten by a user-set flag, or left as +// its default value +func (config *DirectClientConfig) getClusterName() (string, bool) { + if len(config.overrides.Context.Cluster) != 0 { + return config.overrides.Context.Cluster, true + } + context, _ := config.getContext() + return context.Cluster, false +} + +// getContext returns the clientcmdapi.Context, or an error if a required context is not found. +func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { + contexts := config.config.Contexts + contextName, required := config.getContextName() + + mergedContext := clientcmdapi.NewContext() + if configContext, exists := contexts[contextName]; exists { + mergo.MergeWithOverwrite(mergedContext, configContext) + } else if required { + return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) + } + mergo.MergeWithOverwrite(mergedContext, config.overrides.Context) + + return *mergedContext, nil +} + +// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. +func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { + authInfos := config.config.AuthInfos + authInfoName, required := config.getAuthInfoName() + + mergedAuthInfo := clientcmdapi.NewAuthInfo() + if configAuthInfo, exists := authInfos[authInfoName]; exists { + mergo.MergeWithOverwrite(mergedAuthInfo, configAuthInfo) + } else if required { + return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) + } + mergo.MergeWithOverwrite(mergedAuthInfo, config.overrides.AuthInfo) + + return *mergedAuthInfo, nil +} + +// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. +func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { + clusterInfos := config.config.Clusters + clusterInfoName, required := config.getClusterName() + + mergedClusterInfo := clientcmdapi.NewCluster() + mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterDefaults) + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + mergo.MergeWithOverwrite(mergedClusterInfo, configClusterInfo) + } else if required { + return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) + } + mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterInfo) + // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data + // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" + caLen := len(config.overrides.ClusterInfo.CertificateAuthority) + caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) + if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { + mergedClusterInfo.CertificateAuthority = "" + mergedClusterInfo.CertificateAuthorityData = nil + } + + return *mergedClusterInfo, nil +} + +// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. +// Can take options overrides for flags explicitly provided to the command inside the cluster container. +type inClusterClientConfig struct { + overrides *ConfigOverrides + inClusterConfigProvider func() (*restclient.Config, error) +} + +var _ ClientConfig = &inClusterClientConfig{} + +func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { + return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") +} + +func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) { + if config.inClusterConfigProvider == nil { + config.inClusterConfigProvider = restclient.InClusterConfig + } + + icc, err := config.inClusterConfigProvider() + if err != nil { + return nil, err + } + + // in-cluster configs only takes a host, token, or CA file + // if any of them were individually provided, overwrite anything else + if config.overrides != nil { + if server := config.overrides.ClusterInfo.Server; len(server) > 0 { + icc.Host = server + } + if token := config.overrides.AuthInfo.Token; len(token) > 0 { + icc.BearerToken = token + } + if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { + icc.TLSClientConfig.CAFile = certificateAuthorityFile + } + } + + return icc, err +} + +func (config *inClusterClientConfig) Namespace() (string, bool, error) { + // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. + // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up + if ns := os.Getenv("POD_NAMESPACE"); ns != "" { + return ns, false, nil + } + + // Fall back to the namespace associated with the service account token, if available + if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + return ns, false, nil + } + } + + return "default", false, nil +} + +func (config *inClusterClientConfig) ConfigAccess() ConfigAccess { + return NewDefaultClientConfigLoadingRules() +} + +// Possible returns true if loading an inside-kubernetes-cluster is possible. +func (config *inClusterClientConfig) Possible() bool { + fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") + return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + os.Getenv("KUBERNETES_SERVICE_PORT") != "" && + err == nil && !fi.IsDir() +} + +// BuildConfigFromFlags is a helper function that builds configs from a master +// url or a kubeconfig filepath. These are passed in as command line flags for cluster +// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath +// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback +// to the default config. +func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { + if kubeconfigPath == "" && masterUrl == "" { + glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + kubeconfig, err := restclient.InClusterConfig() + if err == nil { + return kubeconfig, nil + } + glog.Warning("error creating inClusterConfig, falling back to default config: ", err) + } + return NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() +} + +// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master +// url and a kubeconfigGetter. +func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { + // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. + cc := NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) + return cc.ClientConfig() +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go new file mode 100644 index 000000000..9495849b0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -0,0 +1,490 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "os" + "path" + "path/filepath" + "reflect" + "sort" + + "github.com/golang/glog" + + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files +type ConfigAccess interface { + // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config + GetLoadingPrecedence() []string + // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules + GetStartingConfig() (*clientcmdapi.Config, error) + // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. + GetDefaultFilename() string + // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more + IsExplicitFile() bool + // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more + GetExplicitFile() string +} + +type PathOptions struct { + // GlobalFile is the full path to the file to load as the global (final) option + GlobalFile string + // EnvVar is the env var name that points to the list of kubeconfig files to load + EnvVar string + // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file + ExplicitFileFlag string + + // GlobalFileSubpath is an optional value used for displaying help + GlobalFileSubpath string + + LoadingRules *ClientConfigLoadingRules +} + +func (o *PathOptions) GetEnvVarFiles() []string { + if len(o.EnvVar) == 0 { + return []string{} + } + + envVarValue := os.Getenv(o.EnvVar) + if len(envVarValue) == 0 { + return []string{} + } + + fileList := filepath.SplitList(envVarValue) + // prevent the same path load multiple times + return deduplicate(fileList) +} + +func (o *PathOptions) GetLoadingPrecedence() []string { + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + return envVarFiles + } + + return []string{o.GlobalFile} +} + +func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { + // don't mutate the original + loadingRules := *o.LoadingRules + loadingRules.Precedence = o.GetLoadingPrecedence() + + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +func (o *PathOptions) GetDefaultFilename() string { + if o.IsExplicitFile() { + return o.GetExplicitFile() + } + + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + if len(envVarFiles) == 1 { + return envVarFiles[0] + } + + // if any of the envvar files already exists, return it + for _, envVarFile := range envVarFiles { + if _, err := os.Stat(envVarFile); err == nil { + return envVarFile + } + } + + // otherwise, return the last one in the list + return envVarFiles[len(envVarFiles)-1] + } + + return o.GlobalFile +} + +func (o *PathOptions) IsExplicitFile() bool { + if len(o.LoadingRules.ExplicitPath) > 0 { + return true + } + + return false +} + +func (o *PathOptions) GetExplicitFile() string { + return o.LoadingRules.ExplicitPath +} + +func NewDefaultPathOptions() *PathOptions { + ret := &PathOptions{ + GlobalFile: RecommendedHomeFile, + EnvVar: RecommendedConfigPathEnvVar, + ExplicitFileFlag: RecommendedConfigPathFlag, + + GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + + LoadingRules: NewDefaultClientConfigLoadingRules(), + } + ret.LoadingRules.DoNotResolvePaths = true + + return ret +} + +// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or +// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. +// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values +// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, +// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any +// modified element. +func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { + possibleSources := configAccess.GetLoadingPrecedence() + // sort the possible kubeconfig files so we always "lock" in the same order + // to avoid deadlock (note: this can fail w/ symlinks, but... come on). + sort.Strings(possibleSources) + for _, filename := range possibleSources { + if err := lockFile(filename); err != nil { + return err + } + defer unlockFile(filename) + } + + startingConfig, err := configAccess.GetStartingConfig() + if err != nil { + return err + } + + // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. + // Special case the test for current context and preferences since those always write to the default file. + if reflect.DeepEqual(*startingConfig, newConfig) { + // nothing to do + return nil + } + + if startingConfig.CurrentContext != newConfig.CurrentContext { + if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { + return err + } + } + + if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { + if err := writePreferences(configAccess, newConfig.Preferences); err != nil { + return err + } + } + + // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions + for key, cluster := range newConfig.Clusters { + startingCluster, exists := startingConfig.Clusters[key] + if !reflect.DeepEqual(cluster, startingCluster) || !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *cluster + + configToWrite.Clusters[key] = &t + configToWrite.Clusters[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + // seenConfigs stores a map of config source filenames to computed config objects + seenConfigs := map[string]*clientcmdapi.Config{} + + for key, context := range newConfig.Contexts { + startingContext, exists := startingConfig.Contexts[key] + if !reflect.DeepEqual(context, startingContext) || !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + // we only obtain a fresh config object from its source file + // if we have not seen it already - this prevents us from + // reading and writing to the same number of files repeatedly + // when multiple / all contexts share the same destination file. + configToWrite, seen := seenConfigs[destinationFile] + if !seen { + var err error + configToWrite, err = getConfigFromFile(destinationFile) + if err != nil { + return err + } + seenConfigs[destinationFile] = configToWrite + } + + configToWrite.Contexts[key] = context + } + } + + // actually persist config object changes + for destinationFile, configToWrite := range seenConfigs { + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + + for key, authInfo := range newConfig.AuthInfos { + startingAuthInfo, exists := startingConfig.AuthInfos[key] + if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *authInfo + configToWrite.AuthInfos[key] = &t + configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, cluster := range startingConfig.Clusters { + if _, exists := newConfig.Clusters[key]; !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Clusters, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range startingConfig.Contexts { + if _, exists := newConfig.Contexts[key]; !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Contexts, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range startingConfig.AuthInfos { + if _, exists := newConfig.AuthInfos[key]; !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.AuthInfos, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + return nil +} + +func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { + return &persister{configAccess, user} +} + +type persister struct { + configAccess ConfigAccess + user string +} + +func (p *persister) Persist(config map[string]string) error { + newConfig, err := p.configAccess.GetStartingConfig() + if err != nil { + return err + } + authInfo, ok := newConfig.AuthInfos[p.user] + if ok && authInfo.AuthProvider != nil { + authInfo.AuthProvider.Config = config + ModifyConfig(p.configAccess, *newConfig, false) + } + return nil +} + +// writeCurrentContext takes three possible paths. +// If newCurrentContext is the same as the startingConfig's current context, then we exit. +// If newCurrentContext has a value, then that value is written into the default destination file. +// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file +func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if startingConfig.CurrentContext == newCurrentContext { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + if len(newCurrentContext) > 0 { + destinationFile := configAccess.GetDefaultFilename() + config, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + config.CurrentContext = newCurrentContext + + if err := WriteToFile(*config, destinationFile); err != nil { + return err + } + + return nil + } + + // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it + for _, file := range configAccess.GetLoadingPrecedence() { + if _, err := os.Stat(file); err == nil { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if len(currConfig.CurrentContext) > 0 { + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + } + + return errors.New("no config found to write context") +} + +func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + for _, file := range configAccess.GetLoadingPrecedence() { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + + return errors.New("no config found to write preferences") +} + +// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. +func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { + config, err := LoadFromFile(filename) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if config == nil { + config = clientcmdapi.NewConfig() + } + return config, nil +} + +// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit +func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { + config, err := getConfigFromFile(filename) + if err != nil { + glog.FatalDepth(1, err) + } + + return config +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/doc.go new file mode 100644 index 000000000..424311ee1 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/doc.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package clientcmd provides one stop shopping for building a working client from a fixed config, +from a .kubeconfig file, from command line flags, or from any merged combination. + +Sample usage from merged .kubeconfig files (local directory, home directory) + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // if you want to change the loading rules (which files in which order), you can do so here + + configOverrides := &clientcmd.ConfigOverrides{} + // if you want to change override values or bind them to flags, there are methods to help you + + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + if err != nil { + // Do something + } + client, err := metav1.New(config) + // ... +*/ +package clientcmd // import "k8s.io/client-go/tools/clientcmd" diff --git a/vendor/k8s.io/client-go/tools/clientcmd/flag.go b/vendor/k8s.io/client-go/tools/clientcmd/flag.go new file mode 100644 index 000000000..8d60d201c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/flag.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +// transformingStringValue implements pflag.Value to store string values, +// allowing transforming them while being set +type transformingStringValue struct { + target *string + transformer func(string) (string, error) +} + +func newTransformingStringValue(val string, target *string, transformer func(string) (string, error)) *transformingStringValue { + *target = val + return &transformingStringValue{ + target: target, + transformer: transformer, + } +} + +func (t *transformingStringValue) Set(val string) error { + val, err := t.transformer(val) + if err != nil { + return err + } + *t.target = val + return nil +} + +func (t *transformingStringValue) Type() string { + return "string" +} + +func (t *transformingStringValue) String() string { + return string(*t.target) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go new file mode 100644 index 000000000..b609d1a76 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "strconv" + "time" +) + +// ParseTimeout returns a parsed duration from a string +// A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h). +func ParseTimeout(duration string) (time.Duration, error) { + if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 { + return (time.Duration(i) * time.Second), nil + } + if requestTimeout, err := time.ParseDuration(duration); err == nil { + return requestTimeout, nil + } + return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go new file mode 100644 index 000000000..6038c8d45 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -0,0 +1,633 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + goruntime "runtime" + "strings" + + "github.com/golang/glog" + "github.com/imdario/mergo" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" + "k8s.io/client-go/util/homedir" +) + +const ( + RecommendedConfigPathFlag = "kubeconfig" + RecommendedConfigPathEnvVar = "KUBECONFIG" + RecommendedHomeDir = ".kube" + RecommendedFileName = "config" + RecommendedSchemaName = "schema" +) + +var ( + RecommendedConfigDir = path.Join(homedir.HomeDir(), RecommendedHomeDir) + RecommendedHomeFile = path.Join(RecommendedConfigDir, RecommendedFileName) + RecommendedSchemaFile = path.Join(RecommendedConfigDir, RecommendedSchemaName) +) + +// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. +// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make +// sure existing config files are migrated to their new locations properly. +func currentMigrationRules() map[string]string { + oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig") + oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName) + + migrationRules := map[string]string{} + migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile + if goruntime.GOOS == "windows" { + migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile + } + return migrationRules +} + +type ClientConfigLoader interface { + ConfigAccess + // IsDefaultConfig returns true if the returned config matches the defaults. + IsDefaultConfig(*restclient.Config) bool + // Load returns the latest config + Load() (*clientcmdapi.Config, error) +} + +type KubeconfigGetter func() (*clientcmdapi.Config, error) + +type ClientConfigGetter struct { + kubeconfigGetter KubeconfigGetter +} + +// ClientConfigGetter implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigGetter{} + +func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) { + return g.kubeconfigGetter() +} + +func (g *ClientConfigGetter) GetLoadingPrecedence() []string { + return nil +} +func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { + return g.kubeconfigGetter() +} +func (g *ClientConfigGetter) GetDefaultFilename() string { + return "" +} +func (g *ClientConfigGetter) IsExplicitFile() bool { + return false +} +func (g *ClientConfigGetter) GetExplicitFile() string { + return "" +} +func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool { + return false +} + +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present +type ClientConfigLoadingRules struct { + ExplicitPath string + Precedence []string + + // MigrationRules is a map of destination files to source files. If a destination file is not present, then the source file is checked. + // If the source file is present, then it is copied to the destination file BEFORE any further loading happens. + MigrationRules map[string]string + + // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so + // that a default object that doesn't set this will usually get the behavior it wants. + DoNotResolvePaths bool + + // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. + // This should match the overrides passed in to ClientConfig loader. + DefaultClientConfig ClientConfig +} + +// ClientConfigLoadingRules implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigLoadingRules{} + +// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to +// use this constructor +func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { + chain := []string{} + + envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) + if len(envVarFiles) != 0 { + fileList := filepath.SplitList(envVarFiles) + // prevent the same path load multiple times + chain = append(chain, deduplicate(fileList)...) + + } else { + chain = append(chain, RecommendedHomeFile) + } + + return &ClientConfigLoadingRules{ + Precedence: chain, + MigrationRules: currentMigrationRules(), + } +} + +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { + if err := rules.Migrate(); err != nil { + return nil, err + } + + errlist := []error{} + + kubeConfigFiles := []string{} + + // Make sure a file we were explicitly told to use exists + if len(rules.ExplicitPath) > 0 { + if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) { + return nil, err + } + kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath) + + } else { + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + } + + kubeconfigs := []*clientcmdapi.Config{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := LoadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdapi.NewConfig() + + for _, kubeconfig := range kubeconfigs { + mergo.MergeWithOverwrite(mapConfig, kubeconfig) + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdapi.NewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + mergo.MergeWithOverwrite(nonMapConfig, kubeconfig) + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdapi.NewConfig() + mergo.MergeWithOverwrite(config, mapConfig) + mergo.MergeWithOverwrite(config, nonMapConfig) + + if rules.ResolvePaths() { + if err := ResolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + } + return config, utilerrors.NewAggregate(errlist) +} + +// Migrate uses the MigrationRules map. If a destination file is not present, then the source file is checked. +// If the source file is present, then it is copied to the destination file BEFORE any further loading happens. +func (rules *ClientConfigLoadingRules) Migrate() error { + if rules.MigrationRules == nil { + return nil + } + + for destination, source := range rules.MigrationRules { + if _, err := os.Stat(destination); err == nil { + // if the destination already exists, do nothing + continue + } else if os.IsPermission(err) { + // if we can't access the file, skip it + continue + } else if !os.IsNotExist(err) { + // if we had an error other than non-existence, fail + return err + } + + if sourceInfo, err := os.Stat(source); err != nil { + if os.IsNotExist(err) || os.IsPermission(err) { + // if the source file doesn't exist or we can't access it, there's no work to do. + continue + } + + // if we had an error other than non-existence, fail + return err + } else if sourceInfo.IsDir() { + return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) + } + + in, err := os.Open(source) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(destination) + if err != nil { + return err + } + defer out.Close() + + if _, err = io.Copy(out, in); err != nil { + return err + } + } + + return nil +} + +// GetLoadingPrecedence implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { + return rules.Precedence +} + +// GetStartingConfig implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) { + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +// GetDefaultFilename implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetDefaultFilename() string { + // Explicit file if we have one. + if rules.IsExplicitFile() { + return rules.GetExplicitFile() + } + // Otherwise, first existing file from precedence. + for _, filename := range rules.GetLoadingPrecedence() { + if _, err := os.Stat(filename); err == nil { + return filename + } + } + // If none exists, use the first from precedence. + if len(rules.Precedence) > 0 { + return rules.Precedence[0] + } + return "" +} + +// IsExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) IsExplicitFile() bool { + return len(rules.ExplicitPath) > 0 +} + +// GetExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetExplicitFile() string { + return rules.ExplicitPath +} + +// IsDefaultConfig returns true if the provided configuration matches the default +func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool { + if rules.DefaultClientConfig == nil { + return false + } + defaultConfig, err := rules.DefaultClientConfig.ClientConfig() + if err != nil { + return false + } + return reflect.DeepEqual(config, defaultConfig) +} + +// LoadFromFile takes a filename and deserializes the contents into Config object +func LoadFromFile(filename string) (*clientcmdapi.Config, error) { + kubeconfigBytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := Load(kubeconfigBytes) + if err != nil { + return nil, err + } + glog.V(6).Infoln("Config loaded from file", filename) + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdapi.Cluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdapi.Context{} + } + + return config, nil +} + +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func Load(data []byte) (*clientcmdapi.Config, error) { + config := clientcmdapi.NewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) + if err != nil { + return nil, err + } + return decoded.(*clientcmdapi.Config), nil +} + +// WriteToFile serializes the config to yaml and writes it out to a file. If not present, it creates the file with the mode 0600. If it is present +// it stomps the contents +func WriteToFile(config clientcmdapi.Config, filename string) error { + content, err := Write(config) + if err != nil { + return err + } + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + + if err := ioutil.WriteFile(filename, content, 0600); err != nil { + return err + } + return nil +} + +func lockFile(filename string) error { + // TODO: find a way to do this with actual file locks. Will + // probably need separate solution for windows and Linux. + + // Make sure the dir exists before we try to create a lock file. + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) + if err != nil { + return err + } + f.Close() + return nil +} + +func unlockFile(filename string) error { + return os.Remove(lockName(filename)) +} + +func lockName(filename string) string { + return filename + ".lock" +} + +// Write serializes the config to yaml. +// Encapsulates serialization without assuming the destination is a file. +func Write(config clientcmdapi.Config) ([]byte, error) { + return runtime.Encode(clientcmdlatest.Codec, &config) +} + +func (rules ClientConfigLoadingRules) ResolvePaths() bool { + return !rules.DoNotResolvePaths +} + +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func ResolveLocalPaths(config *clientcmdapi.Config) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error { + if len(cluster.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %s", cluster.Server) + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil { + return err + } + + return nil +} + +// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error { + if len(authInfo.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %v", authInfo) + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + + return nil +} + +func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error { + return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base) +} + +func ResolveConfigPaths(config *clientcmdapi.Config, base string) error { + return ResolvePaths(GetConfigFileReferences(config), base) +} + +func GetConfigFileReferences(config *clientcmdapi.Config) []*string { + refs := []*string{} + + for _, cluster := range config.Clusters { + refs = append(refs, GetClusterFileReferences(cluster)...) + } + for _, authInfo := range config.AuthInfos { + refs = append(refs, GetAuthInfoFileReferences(authInfo)...) + } + + return refs +} + +func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { + s := []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile} + // Only resolve exec command if it isn't PATH based. + if authInfo.Exec != nil && strings.ContainsRune(authInfo.Exec.Command, filepath.Separator) { + s = append(s, &authInfo.Exec.Command) + } + return s +} + +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func ResolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. +// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error +func RelativizePathWithNoBacksteps(refs []*string, base string) error { + for _, ref := range refs { + // Don't relativize empty paths + if len(*ref) > 0 { + rel, err := MakeRelative(*ref, base) + if err != nil { + return err + } + + // if we have a backstep, don't mess with the path + if strings.HasPrefix(rel, "../") { + if filepath.IsAbs(*ref) { + continue + } + + return fmt.Errorf("%v requires backsteps and is not absolute", *ref) + } + + *ref = rel + } + } + return nil +} + +func MakeRelative(path, base string) (string, error) { + if len(path) > 0 { + rel, err := filepath.Rel(base, path) + if err != nil { + return path, err + } + return rel, nil + } + return path, nil +} + +// deduplicate removes any duplicated values and returns a new slice, keeping the order unchanged +func deduplicate(s []string) []string { + encountered := map[string]bool{} + ret := make([]string, 0) + for i := range s { + if encountered[s[i]] { + continue + } + encountered[s[i]] = true + ret = append(ret, s[i]) + } + return ret +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go new file mode 100644 index 000000000..05038133b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -0,0 +1,168 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "io" + "sync" + + "github.com/golang/glog" + + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type DeferredLoadingClientConfig struct { + loader ClientConfigLoader + overrides *ConfigOverrides + fallbackReader io.Reader + + clientConfig ClientConfig + loadingLock sync.Mutex + + // provided for testing + icc InClusterConfig +} + +// InClusterConfig abstracts details of whether the client is running in a cluster for testing. +type InClusterConfig interface { + ClientConfig + Possible() bool +} + +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} +} + +// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} +} + +func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { + if config.clientConfig == nil { + config.loadingLock.Lock() + defer config.loadingLock.Unlock() + + if config.clientConfig == nil { + mergedConfig, err := config.loader.Load() + if err != nil { + return nil, err + } + + var mergedClientConfig ClientConfig + if config.fallbackReader != nil { + mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader) + } else { + mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader) + } + + config.clientConfig = mergedClientConfig + } + } + + return config.clientConfig, nil +} + +func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { + mergedConfig, err := config.createClientConfig() + if err != nil { + return clientcmdapi.Config{}, err + } + + return mergedConfig.RawConfig() +} + +// ClientConfig implements ClientConfig +func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + + // load the configuration and return on non-empty errors and if the + // content differs from the default config + mergedConfig, err := mergedClientConfig.ClientConfig() + switch { + case err != nil: + if !IsEmptyConfig(err) { + // return on any error except empty config + return nil, err + } + case mergedConfig != nil: + // the configuration is valid, but if this is equal to the defaults we should try + // in-cluster configuration + if !config.loader.IsDefaultConfig(mergedConfig) { + return mergedConfig, nil + } + } + + // check for in-cluster configuration and use it + if config.icc.Possible() { + glog.V(4).Infof("Using in-cluster configuration") + return config.icc.ClientConfig() + } + + // return the result of the merged client config + return mergedConfig, err +} + +// Namespace implements KubeConfig +func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { + mergedKubeConfig, err := config.createClientConfig() + if err != nil { + return "", false, err + } + + ns, overridden, err := mergedKubeConfig.Namespace() + // if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or + // if in-cluster config is not possible, return immediately + if (err != nil && !IsEmptyConfig(err)) || overridden || !config.icc.Possible() { + // return on any error except empty config + return ns, overridden, err + } + + if len(ns) > 0 { + // if we got a non-default namespace from the kubeconfig, use it + if ns != "default" { + return ns, false, nil + } + + // if we got a default namespace, determine whether it was explicit or implicit + if raw, err := mergedKubeConfig.RawConfig(); err == nil { + if context := raw.Contexts[raw.CurrentContext]; context != nil && len(context.Namespace) > 0 { + return ns, false, nil + } + } + } + + glog.V(4).Infof("Using in-cluster namespace") + + // allow the namespace from the service account token directory to be used. + return config.icc.Namespace() +} + +// ConfigAccess implements ClientConfig +func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { + return config.loader +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go new file mode 100644 index 000000000..bfca03284 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -0,0 +1,247 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "strconv" + "strings" + + "github.com/spf13/pflag" + + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't +// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one" +type ConfigOverrides struct { + AuthInfo clientcmdapi.AuthInfo + // ClusterDefaults are applied before the configured cluster info is loaded. + ClusterDefaults clientcmdapi.Cluster + ClusterInfo clientcmdapi.Cluster + Context clientcmdapi.Context + CurrentContext string + Timeout string +} + +// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly +// corresponds to ConfigOverrides +type ConfigOverrideFlags struct { + AuthOverrideFlags AuthOverrideFlags + ClusterOverrideFlags ClusterOverrideFlags + ContextOverrideFlags ContextOverrideFlags + CurrentContext FlagInfo + Timeout FlagInfo +} + +// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects +type AuthOverrideFlags struct { + ClientCertificate FlagInfo + ClientKey FlagInfo + Token FlagInfo + Impersonate FlagInfo + ImpersonateGroups FlagInfo + Username FlagInfo + Password FlagInfo +} + +// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects +type ContextOverrideFlags struct { + ClusterName FlagInfo + AuthInfoName FlagInfo + Namespace FlagInfo +} + +// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects +type ClusterOverrideFlags struct { + APIServer FlagInfo + APIVersion FlagInfo + CertificateAuthority FlagInfo + InsecureSkipTLSVerify FlagInfo +} + +// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to +// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender. This makes for +// coherent extension, without full prescription +type FlagInfo struct { + // LongName is the long string for a flag. If this is empty, then the flag will not be bound + LongName string + // ShortName is the single character for a flag. If this is empty, then there will be no short flag + ShortName string + // Default is the default value for the flag + Default string + // Description is the description for the flag + Description string +} + +// AddSecretAnnotation add secret flag to Annotation. +func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo { + flags.SetAnnotation(f.LongName, "classified", []string{"true"}) + return f +} + +// BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) + } + return f +} + +// BindTransformingStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindTransformingStringFlag(flags *pflag.FlagSet, target *string, transformer func(string) (string, error)) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + flags.VarP(newTransformingStringValue(f.Default, target, transformer), f.LongName, f.ShortName, f.Description) + } + return f +} + +// BindStringSliceFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindStringArrayFlag(flags *pflag.FlagSet, target *[]string) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + sliceVal := []string{} + if len(f.Default) > 0 { + sliceVal = []string{f.Default} + } + flags.StringArrayVarP(target, f.LongName, f.ShortName, sliceVal, f.Description) + } + return f +} + +// BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + // try to parse Default as a bool. If it fails, assume false + boolVal, err := strconv.ParseBool(f.Default) + if err != nil { + boolVal = false + } + + flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) + } + return f +} + +const ( + FlagClusterName = "cluster" + FlagAuthInfoName = "user" + FlagContext = "context" + FlagNamespace = "namespace" + FlagAPIServer = "server" + FlagInsecure = "insecure-skip-tls-verify" + FlagCertFile = "client-certificate" + FlagKeyFile = "client-key" + FlagCAFile = "certificate-authority" + FlagEmbedCerts = "embed-certs" + FlagBearerToken = "token" + FlagImpersonate = "as" + FlagImpersonateGroup = "as-group" + FlagUsername = "username" + FlagPassword = "password" + FlagTimeout = "request-timeout" +) + +// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { + return ConfigOverrideFlags{ + AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), + ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), + ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), + + CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, + } +} + +// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { + return AuthOverrideFlags{ + ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"}, + ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, + Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, + Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, + ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."}, + Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, + Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, + } +} + +// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { + return ClusterOverrideFlags{ + APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"}, + CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"}, + InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, + } +} + +// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { + return ContextOverrideFlags{ + ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"}, + AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"}, + Namespace: FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"}, + } +} + +// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables +func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { + BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) + BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) + BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) + flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) + flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) +} + +// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables +func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { + flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags) + flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) + flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) + flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) + flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags) + flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) + flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) +} + +// BindClusterFlags is a convenience method to bind the specified flags to their associated variables +func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) { + flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server) + flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority) + flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) +} + +// BindFlags is a convenience method to bind the specified flags to their associated variables +func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { + flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) + flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo) + flagNames.Namespace.BindTransformingStringFlag(flags, &contextInfo.Namespace, RemoveNamespacesPrefix) +} + +// RemoveNamespacesPrefix is a transformer that strips "ns/", "namespace/" and "namespaces/" prefixes case-insensitively +func RemoveNamespacesPrefix(value string) (string, error) { + for _, prefix := range []string{"namespaces/", "namespace/", "ns/"} { + if len(value) > len(prefix) && strings.EqualFold(value[0:len(prefix)], prefix) { + value = value[len(prefix):] + break + } + } + return value, nil +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go new file mode 100644 index 000000000..629c0b30a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -0,0 +1,298 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + ErrNoContext = errors.New("no context chosen") + ErrEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + ErrEmptyCluster = errors.New("cluster has no server defined") +) + +type errContextNotFound struct { + ContextName string +} + +func (e *errContextNotFound) Error() string { + return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) +} + +// IsContextNotFound returns a boolean indicating whether the error is known to +// report that a context was not found +func IsContextNotFound(err error) bool { + if err == nil { + return false + } + if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { + return true + } + return strings.Contains(err.Error(), "context was not found for specified context") +} + +// IsEmptyConfig returns true if the provided error indicates the provided configuration +// is empty. +func IsEmptyConfig(err error) bool { + switch t := err.(type) { + case errConfigurationInvalid: + return len(t) == 1 && t[0] == ErrEmptyConfig + } + return err == ErrEmptyConfig +} + +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +// errConfigurationInvalid implements error and Aggregate +var _ error = errConfigurationInvalid{} +var _ utilerrors.Aggregate = errConfigurationInvalid{} + +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) +} + +// Errors implements the AggregateError interface +func (e errConfigurationInvalid) Errors() []error { + return e +} + +// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. +func IsConfigurationInvalid(err error) bool { + switch err.(type) { + case *errContextNotFound, errConfigurationInvalid: + return true + } + return IsContextNotFound(err) +} + +// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. +func Validate(config clientcmdapi.Config) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + if len(config.CurrentContext) != 0 { + if _, exists := config.Contexts[config.CurrentContext]; !exists { + validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) + } + } + + for contextName, context := range config.Contexts { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + } + + for authInfoName, authInfo := range config.AuthInfos { + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) + } + + for clusterName, clusterInfo := range config.Clusters { + validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + var contextName string + if len(passedContextName) != 0 { + contextName = passedContextName + } else { + contextName = config.CurrentContext + } + + if len(contextName) == 0 { + return ErrNoContext + } + + context, exists := config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + + if exists { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { + validationErrors := make([]error, 0) + + emptyCluster := clientcmdapi.NewCluster() + if reflect.DeepEqual(*emptyCluster, clusterInfo) { + return []error{ErrEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + defer clientCertCA.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { + validationErrors := make([]error, 0) + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + defer clientCertFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + defer clientKeyFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + if authInfo.Exec != nil { + if authInfo.AuthProvider != nil { + validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName)) + } + if len(authInfo.Exec.Command) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName)) + } + if len(authInfo.Exec.APIVersion) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName)) + } + for _, v := range authInfo.Exec.Env { + if len(v.Name) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) + } else if len(v.Value) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("env variable %s value must be specified for %v to use exec authentication plugin", v.Name, authInfoName)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + // ImpersonateGroups or ImpersonateUserExtra should be requested with a user + if (len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { + validationErrors = append(validationErrors, fmt.Errorf("requesting groups or user-extra for %v without impersonating a user", authInfoName)) + } + return validationErrors +} + +// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return +func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { + validationErrors := make([]error, 0) + + if len(contextName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context)) + } + + if len(context.AuthInfo) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) + } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) + } + + if len(context.Cluster) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) + } else if _, exists := config.Clusters[context.Cluster]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) + } + + if len(context.Namespace) != 0 { + if len(validation.IsDNS1123Label(context.Namespace)) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) + } + } + + return validationErrors +} diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go index d265db786..74ea3586a 100644 --- a/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -25,11 +25,9 @@ import ( metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) const defaultPageSize = 500 -const defaultPageBufferSize = 10 // ListPageFunc returns a list object for the given list options. type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) @@ -50,9 +48,6 @@ type ListPager struct { PageFn ListPageFunc FullListIfExpired bool - - // Number of pages to buffer - PageBufferSize int32 } // New creates a new pager from the provided pager function using the default @@ -63,7 +58,6 @@ func New(fn ListPageFunc) *ListPager { PageSize: defaultPageSize, PageFn: fn, FullListIfExpired: true, - PageBufferSize: defaultPageBufferSize, } } @@ -79,12 +73,6 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti } var list *metainternalversion.List for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - obj, err := p.PageFn(ctx, options) if err != nil { if !errors.IsResourceExpired(err) || !p.FullListIfExpired { @@ -127,105 +115,3 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti options.Continue = m.GetContinue() } } - -// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If -// fn returns an error, processing stops and that error is returned. If fn does not return an error, -// any error encountered while retrieving the list from the server is returned. If the context -// cancels or times out, the context error is returned. Since the list is retrieved in paginated -// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list -// requests exceed the expiration limit of the apiserver being called. -// -// Items are retrieved in chunks from the server to reduce the impact on the server with up to -// ListPager.PageBufferSize chunks buffered concurrently in the background. -func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { - return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { - return meta.EachListItem(obj, fn) - }) -} - -// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on -// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does -// not return an error, any error encountered while retrieving the list from the server is -// returned. If the context cancels or times out, the context error is returned. Since the list is -// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if -// the pagination list requests exceed the expiration limit of the apiserver being called. -// -// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. -func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { - if p.PageBufferSize < 0 { - return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) - } - - // Ensure background goroutine is stopped if this call exits before all list items are - // processed. Cancelation error from this deferred cancel call is never returned to caller; - // either the list result has already been sent to bgResultC or the fn error is returned and - // the cancelation error is discarded. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - chunkC := make(chan runtime.Object, p.PageBufferSize) - bgResultC := make(chan error, 1) - go func() { - defer utilruntime.HandleCrash() - - var err error - defer func() { - close(chunkC) - bgResultC <- err - }() - err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { - select { - case chunkC <- chunk: // buffer the chunk, this can block - case <-ctx.Done(): - return ctx.Err() - } - return nil - }) - }() - - for o := range chunkC { - err := fn(o) - if err != nil { - return err // any fn error should be returned immediately - } - } - // promote the results of our background goroutine to the foreground - return <-bgResultC -} - -// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list -// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return -// an error, any error encountered while retrieving the list from the server is returned. If the -// context cancels or times out, the context error is returned. Since the list is retrieved in -// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the -// pagination list requests exceed the expiration limit of the apiserver being called. -func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { - if options.Limit == 0 { - options.Limit = p.PageSize - } - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - obj, err := p.PageFn(ctx, options) - if err != nil { - return err - } - m, err := meta.ListAccessor(obj) - if err != nil { - return fmt.Errorf("returned object must be a list: %v", err) - } - if err := fn(obj); err != nil { - return err - } - // if we have no more items, return. - if len(m.GetContinue()) == 0 { - return nil - } - // set the next loop up - options.Continue = m.GetContinue() - } -} diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go new file mode 100644 index 000000000..657ddecbc --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package record has all client logic for recording and reporting events. +package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go new file mode 100644 index 000000000..168dfa80c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -0,0 +1,322 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + ref "k8s.io/client-go/tools/reference" + + "net/http" + + "github.com/golang/glog" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *v1.Event) (*v1.Event, error) + Update(event *v1.Event) (*v1.Event, error) + Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. + PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) + + // AnnotatedEventf is just like eventf, but with annotations attached + AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + // The default math/rand package functions aren't thread safe, so create a + // new Rand object for each StartRecording call. + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) + return eventBroadcaster.StartEventWatcher( + func(event *v1.Event) { + recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + }) +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +func isKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *v1.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return eventBroadcaster.StartEventWatcher( + func(e *v1.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := eventBroadcaster.Watch() + go func() { + defer utilruntime.HandleCrash() + for watchEvent := range watcher.ResultChan() { + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} +} + +type recorderImpl struct { + scheme *runtime.Scheme + source v1.EventSource + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { + ref, err := ref.GetReference(recorder.scheme, object) + if err != nil { + glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !validateEventType(eventtype) { + glog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, annotations, eventtype, reason, message) + event.Source = recorder.source + + go func() { + // NOTE: events should be a non-blocking operation + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func validateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + Annotations: annotations, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go new file mode 100644 index 000000000..a42084f3a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -0,0 +1,462 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/golang/groupcache/lru" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + maxLruCacheEntries = 4096 + + // if we see the same event that varies only by message + // more than 10 times in a 10 minute period, aggregate the event + defaultAggregateMaxEvents = 10 + defaultAggregateIntervalInSeconds = 600 + + // by default, allow a source to send 25 events about an object + // but control the refill rate to 1 new event every 5 minutes + // this helps control the long-tail of events for things that are always + // unhealthy + defaultSpamBurst = 25 + defaultSpamQPS = 1. / 300. +) + +// getEventKey builds unique event key based on source, involvedObject, reason, message +func getEventKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + event.InvolvedObject.FieldPath, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.Message, + }, + "") +} + +// getSpamKey builds unique event key based on source, involvedObject +func getSpamKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + }, + "") +} + +// EventFilterFunc is a function that returns true if the event should be skipped +type EventFilterFunc func(event *v1.Event) bool + +// EventSourceObjectSpamFilter is responsible for throttling +// the amount of events a source and object can produce. +type EventSourceObjectSpamFilter struct { + sync.RWMutex + + // the cache that manages last synced state + cache *lru.Cache + + // burst is the amount of events we allow per source + object + burst int + + // qps is the refill rate of the token bucket in queries per second + qps float32 + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. +func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { + return &EventSourceObjectSpamFilter{ + cache: lru.New(lruCacheSize), + burst: burst, + qps: qps, + clock: clock, + } +} + +// spamRecord holds data used to perform spam filtering decisions. +type spamRecord struct { + // rateLimiter controls the rate of events about this object + rateLimiter flowcontrol.RateLimiter +} + +// Filter controls that a given source+object are not exceeding the allowed rate. +func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { + var record spamRecord + + // controls our cached information about this event (source+object) + eventKey := getSpamKey(event) + + // do we have a record of similar events in our cache? + f.Lock() + defer f.Unlock() + value, found := f.cache.Get(eventKey) + if found { + record = value.(spamRecord) + } + + // verify we have a rate limiter for this record + if record.rateLimiter == nil { + record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) + } + + // ensure we have available rate + filter := !record.rateLimiter.TryAccept() + + // update the cache + f.cache.Add(eventKey, record) + + return filter +} + +// EventAggregatorKeyFunc is responsible for grouping events for aggregation +// It returns a tuple of the following: +// aggregateKey - key the identifies the aggregate group to bucket this event +// localKey - key that makes this event in the local group +type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) + +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason +func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + }, + ""), event.Message +} + +// EventAggregatorMessageFunc is responsible for producing an aggregation message +type EventAggregatorMessageFunc func(event *v1.Event) string + +// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message +func EventAggregatorByReasonMessageFunc(event *v1.Event) string { + return "(combined from similar events): " + event.Message +} + +// EventAggregator identifies similar events and aggregates them into a single event +type EventAggregator struct { + sync.RWMutex + + // The cache that manages aggregation state + cache *lru.Cache + + // The function that groups events for aggregation + keyFunc EventAggregatorKeyFunc + + // The function that generates a message for an aggregate event + messageFunc EventAggregatorMessageFunc + + // The maximum number of events in the specified interval before aggregation occurs + maxEvents uint + + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new + maxIntervalInSeconds uint + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventAggregator returns a new instance of an EventAggregator +func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, + maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { + return &EventAggregator{ + cache: lru.New(lruCacheSize), + keyFunc: keyFunc, + messageFunc: messageFunc, + maxEvents: uint(maxEvents), + maxIntervalInSeconds: uint(maxIntervalInSeconds), + clock: clock, + } +} + +// aggregateRecord holds data used to perform aggregation decisions +type aggregateRecord struct { + // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate + // if the size of this set exceeds the max, we know we need to aggregate + localKeys sets.String + // The last time at which the aggregate was recorded + lastTimestamp metav1.Time +} + +// EventAggregate checks if a similar event has been seen according to the +// aggregation configuration (max events, max interval, etc) and returns: +// +// - The (potentially modified) event that should be created +// - The cache key for the event, for correlation purposes. This will be set to +// the full key for normal events, and to the result of +// EventAggregatorMessageFunc for aggregate events. +func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { + now := metav1.NewTime(e.clock.Now()) + var record aggregateRecord + // eventKey is the full cache key for this event + eventKey := getEventKey(newEvent) + // aggregateKey is for the aggregate event, if one is needed. + aggregateKey, localKey := e.keyFunc(newEvent) + + // Do we have a record of similar events in our cache? + e.Lock() + defer e.Unlock() + value, found := e.cache.Get(aggregateKey) + if found { + record = value.(aggregateRecord) + } + + // Is the previous record too old? If so, make a fresh one. Note: if we didn't + // find a similar record, its lastTimestamp will be the zero value, so we + // create a new one in that case. + maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second + interval := now.Time.Sub(record.lastTimestamp.Time) + if interval > maxInterval { + record = aggregateRecord{localKeys: sets.NewString()} + } + + // Write the new event into the aggregation record and put it on the cache + record.localKeys.Insert(localKey) + record.lastTimestamp = now + e.cache.Add(aggregateKey, record) + + // If we are not yet over the threshold for unique events, don't correlate them + if uint(record.localKeys.Len()) < e.maxEvents { + return newEvent, eventKey + } + + // do not grow our local key set any larger than max + record.localKeys.PopAny() + + // create a new aggregate event, and return the aggregateKey as the cache key + // (so that it can be overwritten.) + eventCopy := &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), + Namespace: newEvent.Namespace, + }, + Count: 1, + FirstTimestamp: now, + InvolvedObject: newEvent.InvolvedObject, + LastTimestamp: now, + Message: e.messageFunc(newEvent), + Type: newEvent.Type, + Reason: newEvent.Reason, + Source: newEvent.Source, + } + return eventCopy, aggregateKey +} + +// eventLog records data about when an event was observed +type eventLog struct { + // The number of times the event has occurred since first occurrence. + count uint + + // The time at which the event was first recorded. + firstTimestamp metav1.Time + + // The unique name of the first occurrence of this event + name string + + // Resource version returned from previous interaction with server + resourceVersion string +} + +// eventLogger logs occurrences of an event +type eventLogger struct { + sync.RWMutex + cache *lru.Cache + clock clock.Clock +} + +// newEventLogger observes events and counts their frequencies +func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { + return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} +} + +// eventObserve records an event, or updates an existing one if key is a cache hit +func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { + var ( + patch []byte + err error + ) + eventCopy := *newEvent + event := &eventCopy + + e.Lock() + defer e.Unlock() + + // Check if there is an existing event we should update + lastObservation := e.lastEventObservationFromCache(key) + + // If we found a result, prepare a patch + if lastObservation.count > 0 { + // update the event based on the last observation so patch will work as desired + event.Name = lastObservation.name + event.ResourceVersion = lastObservation.resourceVersion + event.FirstTimestamp = lastObservation.firstTimestamp + event.Count = int32(lastObservation.count) + 1 + + eventCopy2 := *event + eventCopy2.Count = 0 + eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) + eventCopy2.Message = "" + + newData, _ := json.Marshal(event) + oldData, _ := json.Marshal(eventCopy2) + patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) + } + + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) + return event, patch, err +} + +// updateState updates its internal tracking information based on latest server state +func (e *eventLogger) updateState(event *v1.Event) { + key := getEventKey(event) + e.Lock() + defer e.Unlock() + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) +} + +// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock +func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { + value, ok := e.cache.Get(key) + if ok { + observationValue, ok := value.(eventLog) + if ok { + return observationValue + } + } + return eventLog{} +} + +// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all +// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur +// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication +// to ensure events that are observed multiple times are compacted into a single event with increasing counts. +type EventCorrelator struct { + // the function to filter the event + filterFunc EventFilterFunc + // the object that performs event aggregation + aggregator *EventAggregator + // the object that observes events as they come through + logger *eventLogger +} + +// EventCorrelateResult is the result of a Correlate +type EventCorrelateResult struct { + // the event after correlation + Event *v1.Event + // if provided, perform a strategic patch when updating the record on the server + Patch []byte + // if true, do no further processing of the event + Skip bool +} + +// NewEventCorrelator returns an EventCorrelator configured with default values. +// +// The EventCorrelator is responsible for event filtering, aggregating, and counting +// prior to interacting with the API server to record the event. +// +// The default behavior is as follows: +// * Aggregation is performed if a similar event is recorded 10 times in a +// in a 10 minute rolling interval. A similar event is an event that varies only by +// the Event.Message field. Rather than recording the precise event, aggregation +// will create a new event whose message reports that it has combined events with +// the same reason. +// * Events are incrementally counted if the exact same event is encountered multiple +// times. +// * A source may burst 25 events about an object, but has a refill rate budget +// per object of 1 event every 5 minutes to control long-tail of spam. +func NewEventCorrelator(clock clock.Clock) *EventCorrelator { + cacheSize := maxLruCacheEntries + spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + cacheSize, + EventAggregatorByReasonFunc, + EventAggregatorByReasonMessageFunc, + defaultAggregateMaxEvents, + defaultAggregateIntervalInSeconds, + clock), + + logger: newEventLogger(cacheSize, clock), + } +} + +// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events +func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { + if newEvent == nil { + return nil, fmt.Errorf("event is nil") + } + aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) + observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) + if c.filterFunc(observedEvent) { + return &EventCorrelateResult{Skip: true}, nil + } + return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err +} + +// UpdateState based on the latest observed state from server +func (c *EventCorrelator) UpdateState(event *v1.Event) { + c.logger.updateState(event) +} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go new file mode 100644 index 000000000..6e031daaf --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + } +} + +func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { +} + +func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + f.Eventf(object, eventtype, reason, messageFmt, args) +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 5de0a2cb1..acb126d8b 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -57,10 +57,7 @@ type Config struct { // from TLSClientConfig, Transport, or http.DefaultTransport). The // config may layer other RoundTrippers on top of the returned // RoundTripper. - // - // A future release will change this field to an array. Use config.Wrap() - // instead of setting this value directly. - WrapTransport WrapperFunc + WrapTransport func(rt http.RoundTripper) http.RoundTripper // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) @@ -101,14 +98,6 @@ func (c *Config) HasCertCallback() bool { return c.TLS.GetCert != nil } -// Wrap adds a transport middleware function that will give the caller -// an opportunity to wrap the underlying http.RoundTripper prior to the -// first API call being made. The provided function is invoked after any -// existing transport wrappers are invoked. -func (c *Config) Wrap(fn WrapperFunc) { - c.WrapTransport = Wrappers(c.WrapTransport, fn) -} - // TLSConfig holds the information needed to set up a TLS transport. type TLSConfig struct { CAFile string // Path of the PEM-encoded server trusted root certificates. diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 117a9c8c4..4a210e49f 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,8 +22,8 @@ import ( "strings" "time" + "github.com/golang/glog" "golang.org/x/oauth2" - "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -67,13 +67,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // DebugWrappers wraps a round tripper and logs based on the current log level. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { - case bool(klog.V(9)): + case bool(glog.V(9)): rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(klog.V(8)): + case bool(glog.V(8)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(klog.V(7)): + case bool(glog.V(7)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(klog.V(6)): + case bool(glog.V(6)): rt = newDebuggingRoundTripper(rt, debugURLTiming) } @@ -143,7 +143,7 @@ func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -171,7 +171,7 @@ func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -202,7 +202,7 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -262,7 +262,7 @@ func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegate.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegate) + glog.Errorf("CancelRequest not implemented by %T", rt.delegate) } } @@ -321,7 +321,7 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -405,7 +405,7 @@ func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + glog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) } } @@ -413,17 +413,17 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo := newRequestInfo(req) if rt.levels[debugJustURL] { - klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) } if rt.levels[debugCurlCommand] { - klog.Infof("%s", reqInfo.toCurl()) + glog.Infof("%s", reqInfo.toCurl()) } if rt.levels[debugRequestHeaders] { - klog.Infof("Request Headers:") + glog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { - klog.Infof(" %s: %s", key, value) + glog.Infof(" %s: %s", key, value) } } } @@ -435,16 +435,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo.complete(response, err) if rt.levels[debugURLTiming] { - klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseStatus] { - klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseHeaders] { - klog.Infof("Response Headers:") + glog.Infof("Response Headers:") for key, values := range reqInfo.ResponseHeaders { for _, value := range values { - klog.Infof(" %s: %s", key, value) + glog.Infof(" %s: %s", key, value) } } } diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index b8cadd382..c9bdf9a9f 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -24,8 +24,8 @@ import ( "sync" "time" + "github.com/golang/glog" "golang.org/x/oauth2" - "k8s.io/klog" ) // TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens @@ -59,15 +59,6 @@ func NewCachedFileTokenSource(path string) oauth2.TokenSource { } } -// NewCachedTokenSource returns a oauth2.TokenSource reads a token from a -// designed TokenSource. The ts would provide the source of token. -func NewCachedTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { - return &cachingTokenSource{ - now: time.Now, - base: ts, - } -} - type tokenSourceTransport struct { base http.RoundTripper ort http.RoundTripper @@ -140,7 +131,7 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { if ts.tok == nil { return nil, err } - klog.Errorf("Unable to rotate token: %v", err) + glog.Errorf("Unable to rotate token: %v", err) return ts.tok, nil } diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 2a145c971..c19739fdf 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -17,7 +17,6 @@ limitations under the License. package transport import ( - "context" "crypto/tls" "crypto/x509" "fmt" @@ -168,60 +167,3 @@ func rootCertPool(caData []byte) *x509.CertPool { certPool.AppendCertsFromPEM(caData) return certPool } - -// WrapperFunc wraps an http.RoundTripper when a new transport -// is created for a client, allowing per connection behavior -// to be injected. -type WrapperFunc func(rt http.RoundTripper) http.RoundTripper - -// Wrappers accepts any number of wrappers and returns a wrapper -// function that is the equivalent of calling each of them in order. Nil -// values are ignored, which makes this function convenient for incrementally -// wrapping a function. -func Wrappers(fns ...WrapperFunc) WrapperFunc { - if len(fns) == 0 { - return nil - } - // optimize the common case of wrapping a possibly nil transport wrapper - // with an additional wrapper - if len(fns) == 2 && fns[0] == nil { - return fns[1] - } - return func(rt http.RoundTripper) http.RoundTripper { - base := rt - for _, fn := range fns { - if fn != nil { - base = fn(base) - } - } - return base - } -} - -// ContextCanceller prevents new requests after the provided context is finished. -// err is returned when the context is closed, allowing the caller to provide a context -// appropriate error. -func ContextCanceller(ctx context.Context, err error) WrapperFunc { - return func(rt http.RoundTripper) http.RoundTripper { - return &contextCanceller{ - ctx: ctx, - rt: rt, - err: err, - } - } -} - -type contextCanceller struct { - ctx context.Context - rt http.RoundTripper - err error -} - -func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) { - select { - case <-b.ctx.Done(): - return nil, b.err - default: - return b.rt.RoundTrip(req) - } -} diff --git a/vendor/k8s.io/utils/buffer/ring_growing.go b/vendor/k8s.io/client-go/util/buffer/ring_growing.go similarity index 100% rename from vendor/k8s.io/utils/buffer/ring_growing.go rename to vendor/k8s.io/client-go/util/buffer/ring_growing.go diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 9fd097af5..fe2158b23 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -18,24 +18,29 @@ package cert import ( "bytes" - "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "errors" "fmt" "io/ioutil" + "math" "math/big" "net" "path" "strings" "time" - - "k8s.io/client-go/util/keyutil" ) -const duration365d = time.Hour * 24 * 365 +const ( + rsaKeySize = 2048 + duration365d = time.Hour * 24 * 365 +) // Config contains the basic fields required for creating a certificate type Config struct { @@ -53,8 +58,13 @@ type AltNames struct { IPs []net.IP } +// NewPrivateKey creates an RSA private key +func NewPrivateKey() (*rsa.PrivateKey, error) { + return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) +} + // NewSelfSignedCACert creates a CA certificate -func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { +func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) { now := time.Now() tmpl := x509.Certificate{ SerialNumber: new(big.Int).SetInt64(0), @@ -66,7 +76,7 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) @@ -76,6 +86,58 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro return x509.ParseCertificate(certDERBytes) } +// NewSignedCert creates a signed certificate using the given CA certificate and key +func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) { + serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) + if err != nil { + return nil, err + } + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + if len(cfg.Usages) == 0 { + return nil, errors.New("must specify at least one ExtKeyUsage") + } + + certTmpl := x509.Certificate{ + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + NotBefore: caCert.NotBefore, + NotAfter: time.Now().Add(duration365d).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// MakeEllipticPrivateKeyPEM creates an ECDSA private key +func MakeEllipticPrivateKeyPEM() ([]byte, error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, err + } + + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, err + } + + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil +} + // GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. // Host may be an IP or a DNS name // You may also specify additional subject alt names (either ip or dns names) for the certificate. @@ -125,7 +187,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) @@ -181,7 +243,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a // Generate key keyBuffer := bytes.Buffer{} - if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { return nil, nil, err } @@ -197,6 +259,34 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a return certBuffer.Bytes(), keyBuffer.Bytes(), nil } +// FormatBytesCert receives byte array certificate and formats in human-readable format +func FormatBytesCert(cert []byte) (string, error) { + block, _ := pem.Decode(cert) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return "", fmt.Errorf("failed to parse certificate [%v]", err) + } + return FormatCert(c), nil +} + +// FormatCert receives certificate and formats in human-readable format +func FormatCert(c *x509.Certificate) string { + var ips []string + for _, ip := range c.IPAddresses { + ips = append(ips, ip.String()) + } + altNames := append(ips, c.DNSNames...) + res := fmt.Sprintf( + "Issuer: CN=%s | Subject: CN=%s | CA: %t\n", + c.Issuer.CommonName, c.Subject.CommonName, c.IsCA, + ) + res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter) + if len(altNames) > 0 { + res += fmt.Sprintf("\nAlternate Names: %v", altNames) + } + return res +} + func ipsToStrings(ips []net.IP) []string { ss := make([]string, 0, len(ips)) for _, ip := range ips { diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index 5efb24894..a57bf09d5 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -17,7 +17,11 @@ limitations under the License. package cert import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" "crypto/x509" + "encoding/pem" "fmt" "io/ioutil" "os" @@ -69,6 +73,60 @@ func WriteCert(certPath string, data []byte) error { return ioutil.WriteFile(certPath, data, os.FileMode(0644)) } +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + // Call verifyKeyData to ensure the file wasn't empty/corrupt. + if err == nil && verifyKeyData(loadedData) { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to +// a PEM encoded block or returns an error. +func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { + switch t := privateKey.(type) { + case *ecdsa.PrivateKey: + derBytes, err := x509.MarshalECPrivateKey(t) + if err != nil { + return nil, err + } + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil + case *rsa.PrivateKey: + return EncodePrivateKeyPEM(t), nil + default: + return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) + } +} + // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { @@ -96,3 +154,40 @@ func CertsFromFile(file string) ([]*x509.Certificate, error) { } return certs, nil } + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading private key file %s: %v", file, err) + } + return key, nil +} + +// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. +// Reads public keys from both public and private key files. +func PublicKeysFromFile(file string) ([]interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + keys, err := ParsePublicKeysPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading public key file %s: %v", file, err) + } + return keys, nil +} + +// verifyKeyData returns true if the provided data appears to be a valid private key. +func verifyKeyData(data []byte) bool { + if len(data) == 0 { + return false + } + _, err := ParsePrivateKeyPEM(data) + return err == nil +} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go index 9185e2e22..b99e36651 100644 --- a/vendor/k8s.io/client-go/util/cert/pem.go +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -17,18 +17,136 @@ limitations under the License. package cert import ( + "crypto/ecdsa" + "crypto/rsa" "crypto/x509" "encoding/pem" "errors" + "fmt" ) const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" // CertificateBlockType is a possible value for pem.Block.Type. CertificateBlockType = "CERTIFICATE" // CertificateRequestBlockType is a possible value for pem.Block.Type. CertificateRequestBlockType = "CERTIFICATE REQUEST" ) +// EncodePublicKeyPEM returns PEM-encoded public data +func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { + der, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return []byte{}, err + } + block := pem.Block{ + Type: PublicKeyBlockType, + Bytes: der, + } + return pem.EncodeToMemory(&block), nil +} + +// EncodePrivateKeyPEM returns PEM-encoded private key data +func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { + block := pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + return pem.EncodeToMemory(&block) +} + +// EncodeCertPEM returns PEM-endcoded certificate data +func EncodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: CertificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. +// Reads public keys from both public and private key files. +func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { + var block *pem.Block + keys := []interface{}{} + for { + // read the next block + block, keyData = pem.Decode(keyData) + if block == nil { + break + } + + // test block against parsing functions + if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseECPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + + // tolerate non-key PEM blocks for backwards compatibility + // originally, only the first PEM block was parsed and expected to be a key block + } + + if len(keys) == 0 { + return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") + } + return keys, nil +} + // ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { @@ -59,3 +177,93 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { } return certs, nil } + +// parseRSAPublicKey parses a single RSA public key from the provided data +func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an RSA Public Key + var pubKey *rsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") + } + + return pubKey, nil +} + +// parseRSAPrivateKey parses a single RSA private key from the provided data +func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { + return nil, err + } + } + + // Test if parsed key is an RSA Private Key + var privKey *rsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") + } + + return privKey, nil +} + +// parseECPublicKey parses a single ECDSA public key from the provided data +func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an ECDSA Public Key + var pubKey *ecdsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") + } + + return pubKey, nil +} + +// parseECPrivateKey parses a single ECDSA private key from the provided data +func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { + return nil, err + } + + // Test if parsed key is an ECDSA Private Key + var privKey *ecdsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") + } + + return privKey, nil +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 39cd72f95..71d442a62 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/utils/integer" + "k8s.io/client-go/util/integer" ) type backoffEntry struct { @@ -99,7 +99,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } - return p.Clock.Since(eventTime) < entry.backoff + return p.Clock.Now().Sub(eventTime) < entry.backoff } // Returns True if time since lastupdate is less than the current backoff window. diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go new file mode 100644 index 000000000..816db57f5 --- /dev/null +++ b/vendor/k8s.io/client-go/util/homedir/homedir.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package homedir + +import ( + "os" + "runtime" +) + +// HomeDir returns the home directory for the current user +func HomeDir() string { + if runtime.GOOS == "windows" { + + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); len(home) > 0 { + if _, err := os.Stat(home); err == nil { + return home + } + } + if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDir := homeDrive + homePath + if _, err := os.Stat(homeDir); err == nil { + return homeDir + } + } + if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 { + if _, err := os.Stat(userProfile); err == nil { + return userProfile + } + } + } + return os.Getenv("HOME") +} diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/client-go/util/integer/integer.go similarity index 81% rename from vendor/k8s.io/utils/integer/integer.go rename to vendor/k8s.io/client-go/util/integer/integer.go index e4e740cad..c6ea106f9 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/client-go/util/integer/integer.go @@ -16,7 +16,6 @@ limitations under the License. package integer -// IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { return b @@ -24,7 +23,6 @@ func IntMax(a, b int) int { return a } -// IntMin returns the minimum of the params func IntMin(a, b int) int { if b < a { return b @@ -32,7 +30,6 @@ func IntMin(a, b int) int { return a } -// Int32Max returns the maximum of the params func Int32Max(a, b int32) int32 { if b > a { return b @@ -40,7 +37,6 @@ func Int32Max(a, b int32) int32 { return a } -// Int32Min returns the minimum of the params func Int32Min(a, b int32) int32 { if b < a { return b @@ -48,7 +44,6 @@ func Int32Min(a, b int32) int32 { return a } -// Int64Max returns the maximum of the params func Int64Max(a, b int64) int64 { if b > a { return b @@ -56,7 +51,6 @@ func Int64Max(a, b int64) int64 { return a } -// Int64Min returns the minimum of the params func Int64Min(a, b int64) int64 { if b < a { return b diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go deleted file mode 100644 index 83c2c6254..000000000 --- a/vendor/k8s.io/client-go/util/keyutil/key.go +++ /dev/null @@ -1,323 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package keyutil contains utilities for managing public/private key pairs. -package keyutil - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - cryptorand "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -const ( - // ECPrivateKeyBlockType is a possible value for pem.Block.Type. - ECPrivateKeyBlockType = "EC PRIVATE KEY" - // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. - RSAPrivateKeyBlockType = "RSA PRIVATE KEY" - // PrivateKeyBlockType is a possible value for pem.Block.Type. - PrivateKeyBlockType = "PRIVATE KEY" - // PublicKeyBlockType is a possible value for pem.Block.Type. - PublicKeyBlockType = "PUBLIC KEY" -) - -// MakeEllipticPrivateKeyPEM creates an ECDSA private key -func MakeEllipticPrivateKeyPEM() ([]byte, error) { - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) - if err != nil { - return nil, err - } - - derBytes, err := x509.MarshalECPrivateKey(privateKey) - if err != nil { - return nil, err - } - - privateKeyPemBlock := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(privateKeyPemBlock), nil -} - -// WriteKey writes the pem-encoded key data to keyPath. -// The key file will be created with file mode 0600. -// If the key file already exists, it will be overwritten. -// The parent directory of the keyPath will be created as needed with file mode 0755. -func WriteKey(keyPath string, data []byte) error { - if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { - return err - } - return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) -} - -// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it -// can't find one, it will generate a new key and store it there. -func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { - loadedData, err := ioutil.ReadFile(keyPath) - // Call verifyKeyData to ensure the file wasn't empty/corrupt. - if err == nil && verifyKeyData(loadedData) { - return loadedData, false, err - } - if !os.IsNotExist(err) { - return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) - } - - generatedData, err := MakeEllipticPrivateKeyPEM() - if err != nil { - return nil, false, fmt.Errorf("error generating key: %v", err) - } - if err := WriteKey(keyPath, generatedData); err != nil { - return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) - } - return generatedData, true, nil -} - -// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to -// a PEM encoded block or returns an error. -func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { - switch t := privateKey.(type) { - case *ecdsa.PrivateKey: - derBytes, err := x509.MarshalECPrivateKey(t) - if err != nil { - return nil, err - } - block := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(block), nil - case *rsa.PrivateKey: - block := &pem.Block{ - Type: RSAPrivateKeyBlockType, - Bytes: x509.MarshalPKCS1PrivateKey(t), - } - return pem.EncodeToMemory(block), nil - default: - return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) - } -} - -// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. -// Returns an error if the file could not be read or if the private key could not be parsed. -func PrivateKeyFromFile(file string) (interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - key, err := ParsePrivateKeyPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading private key file %s: %v", file, err) - } - return key, nil -} - -// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. -// Reads public keys from both public and private key files. -func PublicKeysFromFile(file string) ([]interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - keys, err := ParsePublicKeysPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading public key file %s: %v", file, err) - } - return keys, nil -} - -// verifyKeyData returns true if the provided data appears to be a valid private key. -func verifyKeyData(data []byte) bool { - if len(data) == 0 { - return false - } - _, err := ParsePrivateKeyPEM(data) - return err == nil -} - -// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. -// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" -func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { - var privateKeyPemBlock *pem.Block - for { - privateKeyPemBlock, keyData = pem.Decode(keyData) - if privateKeyPemBlock == nil { - break - } - - switch privateKeyPemBlock.Type { - case ECPrivateKeyBlockType: - // ECDSA Private Key in ASN.1 format - if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case RSAPrivateKeyBlockType: - // RSA Private Key in PKCS#1 format - if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case PrivateKeyBlockType: - // RSA or ECDSA Private Key in unencrypted PKCS#8 format - if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - } - - // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks - // originally, only the first PEM block was parsed and expected to be a key block - } - - // we read all the PEM blocks and didn't recognize one - return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") -} - -// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. -// Reads public keys from both public and private key files. -func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { - var block *pem.Block - keys := []interface{}{} - for { - // read the next block - block, keyData = pem.Decode(keyData) - if block == nil { - break - } - - // test block against parsing functions - if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseECPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - - // tolerate non-key PEM blocks for backwards compatibility - // originally, only the first PEM block was parsed and expected to be a key block - } - - if len(keys) == 0 { - return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") - } - return keys, nil -} - -// parseRSAPublicKey parses a single RSA public key from the provided data -func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - // Test if parsed key is an RSA Public Key - var pubKey *rsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") - } - - return pubKey, nil -} - -// parseRSAPrivateKey parses a single RSA private key from the provided data -func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { - return nil, err - } - } - - // Test if parsed key is an RSA Private Key - var privKey *rsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") - } - - return privKey, nil -} - -// parseECPublicKey parses a single ECDSA public key from the provided data -func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - // Test if parsed key is an ECDSA Public Key - var pubKey *ecdsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") - } - - return pubKey, nil -} - -// parseECPrivateKey parses a single ECDSA private key from the provided data -func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { - return nil, err - } - - // Test if parsed key is an ECDSA Private Key - var privKey *ecdsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") - } - - return privKey, nil -} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go new file mode 100644 index 000000000..a5bed29e0 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -0,0 +1,211 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "math" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type RateLimiter interface { + // When gets an item and gets to decide how long that item should wait + When(item interface{}) time.Duration + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop tracking it + Forget(item interface{}) + // NumRequeues returns back how many failures the item has had + NumRequeues(item interface{}) int +} + +// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential +func DefaultControllerRateLimiter() RateLimiter { + return NewMaxOfRateLimiter( + NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) +} + +// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type BucketRateLimiter struct { + *rate.Limiter +} + +var _ RateLimiter = &BucketRateLimiter{} + +func (r *BucketRateLimiter) When(item interface{}) time.Duration { + return r.Limiter.Reserve().Delay() +} + +func (r *BucketRateLimiter) NumRequeues(item interface{}) int { + return 0 +} + +func (r *BucketRateLimiter) Forget(item interface{}) { +} + +// ItemExponentialFailureRateLimiter does a simple baseDelay*10^ limit +// dealing with max failures and expiration are up to the caller +type ItemExponentialFailureRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + baseDelay time.Duration + maxDelay time.Duration +} + +var _ RateLimiter = &ItemExponentialFailureRateLimiter{} + +func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { + return &ItemExponentialFailureRateLimiter{ + failures: map[interface{}]int{}, + baseDelay: baseDelay, + maxDelay: maxDelay, + } +} + +func DefaultItemBasedRateLimiter() RateLimiter { + return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) +} + +func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + exp := r.failures[item] + r.failures[item] = r.failures[item] + 1 + + // The backoff is capped such that 'calculated' value never overflows. + backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) + if backoff > math.MaxInt64 { + return r.maxDelay + } + + calculated := time.Duration(backoff) + if calculated > r.maxDelay { + return r.maxDelay + } + + return calculated +} + +func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type ItemFastSlowRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + maxFastAttempts int + fastDelay time.Duration + slowDelay time.Duration +} + +var _ RateLimiter = &ItemFastSlowRateLimiter{} + +func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { + return &ItemFastSlowRateLimiter{ + failures: map[interface{}]int{}, + fastDelay: fastDelay, + slowDelay: slowDelay, + maxFastAttempts: maxFastAttempts, + } +} + +func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + r.failures[item] = r.failures[item] + 1 + + if r.failures[item] <= r.maxFastAttempts { + return r.fastDelay + } + + return r.slowDelay +} + +func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// MaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type MaxOfRateLimiter struct { + limiters []RateLimiter +} + +func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { + ret := time.Duration(0) + for _, limiter := range r.limiters { + curr := limiter.When(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { + return &MaxOfRateLimiter{limiters: limiters} +} + +func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { + ret := 0 + for _, limiter := range r.limiters { + curr := limiter.NumRequeues(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func (r *MaxOfRateLimiter) Forget(item interface{}) { + for _, limiter := range r.limiters { + limiter.Forget(item) + } +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go new file mode 100644 index 000000000..a37177425 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -0,0 +1,255 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "container/heap" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type DelayingInterface interface { + Interface + // AddAfter adds an item to the workqueue after the indicated duration has passed + AddAfter(item interface{}, duration time.Duration) +} + +// NewDelayingQueue constructs a new workqueue with delayed queuing ability +func NewDelayingQueue() DelayingInterface { + return newDelayingQueue(clock.RealClock{}, "") +} + +func NewNamedDelayingQueue(name string) DelayingInterface { + return newDelayingQueue(clock.RealClock{}, name) +} + +func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { + ret := &delayingType{ + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), + } + + go ret.waitingLoop() + + return ret +} + +// delayingType wraps an Interface and provides delayed re-enquing +type delayingType struct { + Interface + + // clock tracks time for delayed firing + clock clock.Clock + + // stopCh lets us signal a shutdown to the waiting loop + stopCh chan struct{} + + // heartbeat ensures we wait no more than maxWait before firing + heartbeat clock.Ticker + + // waitingForAddCh is a buffered channel that feeds waitingForAdd + waitingForAddCh chan *waitFor + + // metrics counts the number of retries + metrics retryMetrics +} + +// waitFor holds the data to add and the time it should be added +type waitFor struct { + data t + readyAt time.Time + // index in the priority queue (heap) + index int +} + +// waitForPriorityQueue implements a priority queue for waitFor items. +// +// waitForPriorityQueue implements heap.Interface. The item occurring next in +// time (i.e., the item with the smallest readyAt) is at the root (index 0). +// Peek returns this minimum item at index 0. Pop returns the minimum item after +// it has been removed from the queue and placed at index Len()-1 by +// container/heap. Push adds an item at index Len(), and container/heap +// percolates it into the correct location. +type waitForPriorityQueue []*waitFor + +func (pq waitForPriorityQueue) Len() int { + return len(pq) +} +func (pq waitForPriorityQueue) Less(i, j int) bool { + return pq[i].readyAt.Before(pq[j].readyAt) +} +func (pq waitForPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +// Push adds an item to the queue. Push should not be called directly; instead, +// use `heap.Push`. +func (pq *waitForPriorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*waitFor) + item.index = n + *pq = append(*pq, item) +} + +// Pop removes an item from the queue. Pop should not be called directly; +// instead, use `heap.Pop`. +func (pq *waitForPriorityQueue) Pop() interface{} { + n := len(*pq) + item := (*pq)[n-1] + item.index = -1 + *pq = (*pq)[0:(n - 1)] + return item +} + +// Peek returns the item at the beginning of the queue, without removing the +// item or otherwise mutating the queue. It is safe to call directly. +func (pq waitForPriorityQueue) Peek() interface{} { + return pq[0] +} + +// ShutDown gives a way to shut off this queue +func (q *delayingType) ShutDown() { + q.Interface.ShutDown() + close(q.stopCh) + q.heartbeat.Stop() +} + +// AddAfter adds the given item to the work queue after the given delay +func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { + // don't add if we're already shutting down + if q.ShuttingDown() { + return + } + + q.metrics.retry() + + // immediately add things with no delay + if duration <= 0 { + q.Add(item) + return + } + + select { + case <-q.stopCh: + // unblock if ShutDown() is called + case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + } +} + +// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening. +// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an +// expired item sitting for more than 10 seconds. +const maxWait = 10 * time.Second + +// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. +func (q *delayingType) waitingLoop() { + defer utilruntime.HandleCrash() + + // Make a placeholder channel to use when there are no items in our list + never := make(<-chan time.Time) + + waitingForQueue := &waitForPriorityQueue{} + heap.Init(waitingForQueue) + + waitingEntryByData := map[t]*waitFor{} + + for { + if q.Interface.ShuttingDown() { + return + } + + now := q.clock.Now() + + // Add ready entries + for waitingForQueue.Len() > 0 { + entry := waitingForQueue.Peek().(*waitFor) + if entry.readyAt.After(now) { + break + } + + entry = heap.Pop(waitingForQueue).(*waitFor) + q.Add(entry.data) + delete(waitingEntryByData, entry.data) + } + + // Set up a wait for the first item's readyAt (if one exists) + nextReadyAt := never + if waitingForQueue.Len() > 0 { + entry := waitingForQueue.Peek().(*waitFor) + nextReadyAt = q.clock.After(entry.readyAt.Sub(now)) + } + + select { + case <-q.stopCh: + return + + case <-q.heartbeat.C(): + // continue the loop, which will add ready items + + case <-nextReadyAt: + // continue the loop, which will add ready items + + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + + drained := false + for !drained { + select { + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + default: + drained = true + } + } + } + } +} + +// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue +func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { + // if the entry already exists, update the time only if it would cause the item to be queued sooner + existing, exists := knownEntries[entry.data] + if exists { + if existing.readyAt.After(entry.readyAt) { + existing.readyAt = entry.readyAt + heap.Fix(q, existing.index) + } + + return + } + + heap.Push(q, entry) + knownEntries[entry.data] = entry +} diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go new file mode 100644 index 000000000..2a00c74ac --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package workqueue provides a simple queue that supports the following +// features: +// * Fair: items processed in the order in which they are added. +// * Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// * Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +// * Shutdown notifications. +package workqueue diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go new file mode 100644 index 000000000..a481bdfb2 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -0,0 +1,195 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" + "time" +) + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +type queueMetrics interface { + add(item t) + get(item t) + done(item t) +} + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Inc() + Dec() +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} + +type defaultQueueMetrics struct { + // current depth of a workqueue + depth GaugeMetric + // total number of adds handled by a workqueue + adds CounterMetric + // how long an item stays in a workqueue + latency SummaryMetric + // how long processing an item from a workqueue takes + workDuration SummaryMetric + addTimes map[t]time.Time + processingStartTimes map[t]time.Time +} + +func (m *defaultQueueMetrics) add(item t) { + if m == nil { + return + } + + m.adds.Inc() + m.depth.Inc() + if _, exists := m.addTimes[item]; !exists { + m.addTimes[item] = time.Now() + } +} + +func (m *defaultQueueMetrics) get(item t) { + if m == nil { + return + } + + m.depth.Dec() + m.processingStartTimes[item] = time.Now() + if startTime, exists := m.addTimes[item]; exists { + m.latency.Observe(sinceInMicroseconds(startTime)) + delete(m.addTimes, item) + } +} + +func (m *defaultQueueMetrics) done(item t) { + if m == nil { + return + } + + if startTime, exists := m.processingStartTimes[item]; exists { + m.workDuration.Observe(sinceInMicroseconds(startTime)) + delete(m.processingStartTimes, item) + } +} + +// Gets the time since the specified start in microseconds. +func sinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} + +type retryMetrics interface { + retry() +} + +type defaultRetryMetrics struct { + retries CounterMetric +} + +func (m *defaultRetryMetrics) retry() { + if m == nil { + return + } + + m.retries.Inc() +} + +// MetricsProvider generates various metrics used by the queue. +type MetricsProvider interface { + NewDepthMetric(name string) GaugeMetric + NewAddsMetric(name string) CounterMetric + NewLatencyMetric(name string) SummaryMetric + NewWorkDurationMetric(name string) SummaryMetric + NewRetriesMetric(name string) CounterMetric +} + +type noopMetricsProvider struct{} + +func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { + return noopMetric{} +} + +var metricsFactory = struct { + metricsProvider MetricsProvider + setProviders sync.Once +}{ + metricsProvider: noopMetricsProvider{}, +} + +func newQueueMetrics(name string) queueMetrics { + var ret *defaultQueueMetrics + if len(name) == 0 { + return ret + } + return &defaultQueueMetrics{ + depth: metricsFactory.metricsProvider.NewDepthMetric(name), + adds: metricsFactory.metricsProvider.NewAddsMetric(name), + latency: metricsFactory.metricsProvider.NewLatencyMetric(name), + workDuration: metricsFactory.metricsProvider.NewWorkDurationMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, + } +} + +func newRetryMetrics(name string) retryMetrics { + var ret *defaultRetryMetrics + if len(name) == 0 { + return ret + } + return &defaultRetryMetrics{ + retries: metricsFactory.metricsProvider.NewRetriesMetric(name), + } +} + +// SetProvider sets the metrics provider of the metricsFactory. +func SetProvider(metricsProvider MetricsProvider) { + metricsFactory.setProviders.Do(func() { + metricsFactory.metricsProvider = metricsProvider + }) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go new file mode 100644 index 000000000..526bd244e --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -0,0 +1,69 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "context" + "sync" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +type DoWorkPieceFunc func(piece int) + +// Parallelize is a very simple framework that allows for parallelizing +// N independent pieces of work. +func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { + ParallelizeUntil(nil, workers, pieces, doWorkPiece) +} + +// ParallelizeUntil is a framework that allows for parallelizing N +// independent pieces of work until done or the context is canceled. +func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) { + var stop <-chan struct{} + if ctx != nil { + stop = ctx.Done() + } + + toProcess := make(chan int, pieces) + for i := 0; i < pieces; i++ { + toProcess <- i + } + close(toProcess) + + if pieces < workers { + workers = pieces + } + + wg := sync.WaitGroup{} + wg.Add(workers) + for i := 0; i < workers; i++ { + go func() { + defer utilruntime.HandleCrash() + defer wg.Done() + for piece := range toProcess { + select { + case <-stop: + return + default: + doWorkPiece(piece) + } + } + }() + } + wg.Wait() +} diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go new file mode 100644 index 000000000..dc9a7cc7b --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -0,0 +1,172 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" +) + +type Interface interface { + Add(item interface{}) + Len() int + Get() (item interface{}, shutdown bool) + Done(item interface{}) + ShutDown() + ShuttingDown() bool +} + +// New constructs a new work queue (see the package comment). +func New() *Type { + return NewNamed("") +} + +func NewNamed(name string) *Type { + return &Type{ + dirty: set{}, + processing: set{}, + cond: sync.NewCond(&sync.Mutex{}), + metrics: newQueueMetrics(name), + } +} + +// Type is a work queue (see the package comment). +type Type struct { + // queue defines the order in which we will work on items. Every + // element of queue should be in the dirty set and not in the + // processing set. + queue []t + + // dirty defines all of the items that need to be processed. + dirty set + + // Things that are currently being processed are in the processing set. + // These things may be simultaneously in the dirty set. When we finish + // processing something and remove it from this set, we'll check if + // it's in the dirty set, and if so, add it to the queue. + processing set + + cond *sync.Cond + + shuttingDown bool + + metrics queueMetrics +} + +type empty struct{} +type t interface{} +type set map[t]empty + +func (s set) has(item t) bool { + _, exists := s[item] + return exists +} + +func (s set) insert(item t) { + s[item] = empty{} +} + +func (s set) delete(item t) { + delete(s, item) +} + +// Add marks item as needing processing. +func (q *Type) Add(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if q.shuttingDown { + return + } + if q.dirty.has(item) { + return + } + + q.metrics.add(item) + + q.dirty.insert(item) + if q.processing.has(item) { + return + } + + q.queue = append(q.queue, item) + q.cond.Signal() +} + +// Len returns the current queue length, for informational purposes only. You +// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular +// value, that can't be synchronized properly. +func (q *Type) Len() int { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return len(q.queue) +} + +// Get blocks until it can return an item to be processed. If shutdown = true, +// the caller should end their goroutine. You must call Done with item when you +// have finished processing it. +func (q *Type) Get() (item interface{}, shutdown bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + for len(q.queue) == 0 && !q.shuttingDown { + q.cond.Wait() + } + if len(q.queue) == 0 { + // We must be shutting down. + return nil, true + } + + item, q.queue = q.queue[0], q.queue[1:] + + q.metrics.get(item) + + q.processing.insert(item) + q.dirty.delete(item) + + return item, false +} + +// Done marks item as done processing, and if it has been marked as dirty again +// while it was being processed, it will be re-added to the queue for +// re-processing. +func (q *Type) Done(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.metrics.done(item) + + q.processing.delete(item) + if q.dirty.has(item) { + q.queue = append(q.queue, item) + q.cond.Signal() + } +} + +// ShutDown will cause q to ignore all new items added to it. As soon as the +// worker goroutines have drained the existing items in the queue, they will be +// instructed to exit. +func (q *Type) ShutDown() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.shuttingDown = true + q.cond.Broadcast() +} + +func (q *Type) ShuttingDown() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + return q.shuttingDown +} diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go new file mode 100644 index 000000000..417ac001b --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go @@ -0,0 +1,69 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +// RateLimitingInterface is an interface that rate limits items being added to the queue. +type RateLimitingInterface interface { + DelayingInterface + + // AddRateLimited adds an item to the workqueue after the rate limiter says its ok + AddRateLimited(item interface{}) + + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you + // still have to call `Done` on the queue. + Forget(item interface{}) + + // NumRequeues returns back how many times the item was requeued + NumRequeues(item interface{}) int +} + +// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewDelayingQueue(), + rateLimiter: rateLimiter, + } +} + +func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewNamedDelayingQueue(name), + rateLimiter: rateLimiter, + } +} + +// rateLimitingType wraps an Interface and provides rateLimited re-enquing +type rateLimitingType struct { + DelayingInterface + + rateLimiter RateLimiter +} + +// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok +func (q *rateLimitingType) AddRateLimited(item interface{}) { + q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +} + +func (q *rateLimitingType) NumRequeues(item interface{}) int { + return q.rateLimiter.NumRequeues(item) +} + +func (q *rateLimitingType) Forget(item interface{}) { + q.rateLimiter.Forget(item) +} diff --git a/vendor/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md b/vendor/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md index e7e5eb834..e559c074b 100644 --- a/vendor/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md +++ b/vendor/k8s.io/code-generator/.github/PULL_REQUEST_TEMPLATE.md @@ -1,2 +1,2 @@ -Sorry, we do not accept changes directly against this repository. Please see +Sorry, we do not accept changes directly against this repository. Please see CONTRIBUTING.md for information on where and how to contribute instead. diff --git a/vendor/k8s.io/code-generator/CONTRIBUTING.md b/vendor/k8s.io/code-generator/CONTRIBUTING.md index 76625b7bc..bc4e7697e 100644 --- a/vendor/k8s.io/code-generator/CONTRIBUTING.md +++ b/vendor/k8s.io/code-generator/CONTRIBUTING.md @@ -2,6 +2,6 @@ Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. -This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/code-generator](https://git.k8s.io/kubernetes/staging/src/k8s.io/code-generator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/code-generator](https://git.k8s.io/kubernetes/staging/src/k8s.io/code-generator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). -Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/vendor/k8s.io/code-generator/Godeps/Godeps.json b/vendor/k8s.io/code-generator/Godeps/Godeps.json index 274173dad..231233a93 100644 --- a/vendor/k8s.io/code-generator/Godeps/Godeps.json +++ b/vendor/k8s.io/code-generator/Godeps/Godeps.json @@ -1,86 +1,282 @@ { "ImportPath": "k8s.io/code-generator", - "GoVersion": "unknown", - "GodepVersion": "gen-godeps", + "GoVersion": "go1.10", + "GodepVersion": "v80", "Packages": [ "./..." ], "Deps": [ { - "ImportPath": "github.com/BurntSushi/xgb", - "Rev": "27f122750802" + "ImportPath": "github.com/PuerkitoBio/purell", + "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" }, { - "ImportPath": "github.com/gogo/protobuf", - "Rev": "v1.0.0" + "ImportPath": "github.com/PuerkitoBio/urlesc", + "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" }, { - "ImportPath": "github.com/remyoudompheng/bigfft", - "Rev": "52369c62f446" + "ImportPath": "github.com/emicklei/go-restful", + "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" + }, + { + "ImportPath": "github.com/emicklei/go-restful/log", + "Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46" + }, + { + "ImportPath": "github.com/go-openapi/jsonpointer", + "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" + }, + { + "ImportPath": "github.com/go-openapi/jsonreference", + "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" + }, + { + "ImportPath": "github.com/go-openapi/spec", + "Rev": "1de3e0542de65ad8d75452a595886fdd0befb363" + }, + { + "ImportPath": "github.com/go-openapi/swag", + "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" + }, + { + "ImportPath": "github.com/gogo/protobuf/gogoproto", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/compare", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/defaultcheck", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/description", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/embedcheck", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/enumstringer", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/equal", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/face", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/gostring", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/marshalto", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/oneofcheck", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/populate", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/size", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/stringer", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/testgen", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/union", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/plugin/unmarshal", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/proto", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/generator", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/grpc", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/protoc-gen-gogo/plugin", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/sortkeys", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/vanity", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/gogo/protobuf/vanity/command", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, + { + "ImportPath": "github.com/golang/glog", + "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" + }, + { + "ImportPath": "github.com/mailru/easyjson/buffer", + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" + }, + { + "ImportPath": "github.com/mailru/easyjson/jlexer", + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" + }, + { + "ImportPath": "github.com/mailru/easyjson/jwriter", + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "v1.0.3" + "Rev": "583c0c0531f06d5278b7d917446061adc344b5cd" + }, + { + "ImportPath": "golang.org/x/net/idna", + "Rev": "0ed95abb35c445290478a5348a7b38bb154135fd" + }, + { + "ImportPath": "golang.org/x/text/cases", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/internal", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/internal/tag", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/language", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/runes", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/secure/bidirule", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/secure/precis", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/transform", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/unicode/bidi", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/unicode/norm", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/text/width", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" + }, + { + "ImportPath": "golang.org/x/tools/go/ast/astutil", + "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" + }, + { + "ImportPath": "golang.org/x/tools/imports", + "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "670d4cfef0544295bc27a114dbac37980d83185a" }, { - "ImportPath": "golang.org/x/exp", - "Rev": "4b39c73a6495" + "ImportPath": "k8s.io/gengo/args", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "golang.org/x/image", - "Rev": "0694c2d4d067" + "ImportPath": "k8s.io/gengo/examples/deepcopy-gen/generators", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "golang.org/x/mobile", - "Rev": "d3739f865fa6" + "ImportPath": "k8s.io/gengo/examples/defaulter-gen/generators", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "golang.org/x/sys", - "Rev": "3b5209105503" + "ImportPath": "k8s.io/gengo/examples/import-boss/generators", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "golang.org/x/text", - "Rev": "e6919f6577db" + "ImportPath": "k8s.io/gengo/examples/set-gen/generators", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "golang.org/x/tools", - "Rev": "aa82965741a9" + "ImportPath": "k8s.io/gengo/examples/set-gen/sets", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "gonum.org/v1/gonum", - "Rev": "3d26580ed485" + "ImportPath": "k8s.io/gengo/generator", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "gonum.org/v1/netlib", - "Rev": "76723241ea4e" + "ImportPath": "k8s.io/gengo/namer", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "k8s.io/gengo", - "Rev": "f8a0810f38af" + "ImportPath": "k8s.io/gengo/parser", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "k8s.io/klog", - "Rev": "v0.3.1" + "ImportPath": "k8s.io/gengo/types", + "Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2" }, { - "ImportPath": "modernc.org/cc", - "Rev": "v1.0.0" + "ImportPath": "k8s.io/kube-openapi/cmd/openapi-gen/args", + "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" }, { - "ImportPath": "modernc.org/golex", - "Rev": "v1.0.0" + "ImportPath": "k8s.io/kube-openapi/pkg/common", + "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" }, { - "ImportPath": "modernc.org/mathutil", - "Rev": "v1.0.0" + "ImportPath": "k8s.io/kube-openapi/pkg/generators", + "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" }, { - "ImportPath": "modernc.org/strutil", - "Rev": "v1.0.0" + "ImportPath": "k8s.io/kube-openapi/pkg/generators/rules", + "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" }, { - "ImportPath": "modernc.org/xc", - "Rev": "v1.0.0" + "ImportPath": "k8s.io/kube-openapi/pkg/util/sets", + "Rev": "0cf8f7e6ed1d2e3d47d02e3b6e559369af24d803" } ] -} \ No newline at end of file +} diff --git a/vendor/k8s.io/code-generator/Godeps/OWNERS b/vendor/k8s.io/code-generator/Godeps/OWNERS index 0f5d2f673..3d49f3060 100644 --- a/vendor/k8s.io/code-generator/Godeps/OWNERS +++ b/vendor/k8s.io/code-generator/Godeps/OWNERS @@ -1,4 +1,2 @@ -# See the OWNERS docs at https://go.k8s.io/owners - approvers: - dep-approvers diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS index 6f7abe3ed..4155fc60c 100644 --- a/vendor/k8s.io/code-generator/OWNERS +++ b/vendor/k8s.io/code-generator/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - approvers: - lavalamp - wojtek-t diff --git a/vendor/k8s.io/code-generator/README.md b/vendor/k8s.io/code-generator/README.md index e03c6bf55..5808c86b3 100644 --- a/vendor/k8s.io/code-generator/README.md +++ b/vendor/k8s.io/code-generator/README.md @@ -1,6 +1,6 @@ # code-generator -Golang code-generators used to implement [Kubernetes-style API types](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). +Golang code-generators used to implement [Kubernetes-style API types](https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md). ## Purpose diff --git a/vendor/k8s.io/code-generator/SECURITY_CONTACTS b/vendor/k8s.io/code-generator/SECURITY_CONTACTS index 6df6a4d6a..0648a8ebf 100644 --- a/vendor/k8s.io/code-generator/SECURITY_CONTACTS +++ b/vendor/k8s.io/code-generator/SECURITY_CONTACTS @@ -1,17 +1,17 @@ # Defined below are the security contacts for this repo. # -# They are the contact point for the Product Security Committee to reach out +# They are the contact point for the Product Security Team to reach out # to for triaging and handling of incoming issues. # # The below names agree to abide by the -# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) # and will be removed and replaced if they violate that agreement. # # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ cjcullen -joelsmith +jessfraz liggitt philips tallclair diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/types.go b/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/types.go deleted file mode 100644 index d79ea38b7..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/apis/example/v1/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TestType is a top-level type. A client is created for it. -type TestType struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - // +optional - Status TestTypeStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TestTypeList is a top-level list type. The client methods for lists are automatically created. -// You are not supposed to create a separated client for this one. -type TestTypeList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []TestType `json:"items"` -} - -type TestTypeStatus struct { - Blah string -} - -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ClusterTestTypeList struct { - metav1.TypeMeta - metav1.ListMeta - Items []ClusterTestType -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale - -type ClusterTestType struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - // +optional - Status ClusterTestTypeStatus `json:"status,omitempty"` -} - -type ClusterTestTypeStatus struct { - Blah string -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go deleted file mode 100644 index 559ce47eb..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/clustertesttype.go +++ /dev/null @@ -1,210 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" - scheme "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" -) - -// ClusterTestTypesGetter has a method to return a ClusterTestTypeInterface. -// A group's client should implement this interface. -type ClusterTestTypesGetter interface { - ClusterTestTypes() ClusterTestTypeInterface -} - -// ClusterTestTypeInterface has methods to work with ClusterTestType resources. -type ClusterTestTypeInterface interface { - Create(*v1.ClusterTestType) (*v1.ClusterTestType, error) - Update(*v1.ClusterTestType) (*v1.ClusterTestType, error) - UpdateStatus(*v1.ClusterTestType) (*v1.ClusterTestType, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ClusterTestType, error) - List(opts metav1.ListOptions) (*v1.ClusterTestTypeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) - GetScale(clusterTestTypeName string, options metav1.GetOptions) (*autoscaling.Scale, error) - UpdateScale(clusterTestTypeName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) - - ClusterTestTypeExpansion -} - -// clusterTestTypes implements ClusterTestTypeInterface -type clusterTestTypes struct { - client rest.Interface -} - -// newClusterTestTypes returns a ClusterTestTypes -func newClusterTestTypes(c *ExampleV1Client) *clusterTestTypes { - return &clusterTestTypes{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterTestType, and returns the corresponding clusterTestType object, and an error if there is any. -func (c *clusterTestTypes) Get(name string, options metav1.GetOptions) (result *v1.ClusterTestType, err error) { - result = &v1.ClusterTestType{} - err = c.client.Get(). - Resource("clustertesttypes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterTestTypes that match those selectors. -func (c *clusterTestTypes) List(opts metav1.ListOptions) (result *v1.ClusterTestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterTestTypeList{} - err = c.client.Get(). - Resource("clustertesttypes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterTestTypes. -func (c *clusterTestTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustertesttypes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Create(clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { - result = &v1.ClusterTestType{} - err = c.client.Post(). - Resource("clustertesttypes"). - Body(clusterTestType). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *clusterTestTypes) Update(clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { - result = &v1.ClusterTestType{} - err = c.client.Put(). - Resource("clustertesttypes"). - Name(clusterTestType.Name). - Body(clusterTestType). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusterTestTypes) UpdateStatus(clusterTestType *v1.ClusterTestType) (result *v1.ClusterTestType, err error) { - result = &v1.ClusterTestType{} - err = c.client.Put(). - Resource("clustertesttypes"). - Name(clusterTestType.Name). - SubResource("status"). - Body(clusterTestType). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterTestType and deletes it. Returns an error if one occurs. -func (c *clusterTestTypes) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustertesttypes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterTestTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustertesttypes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterTestType. -func (c *clusterTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterTestType, err error) { - result = &v1.ClusterTestType{} - err = c.client.Patch(pt). - Resource("clustertesttypes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the clusterTestType, and returns the corresponding autoscaling.Scale object, and an error if there is any. -func (c *clusterTestTypes) GetScale(clusterTestTypeName string, options metav1.GetOptions) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Get(). - Resource("clustertesttypes"). - Name(clusterTestTypeName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *clusterTestTypes) UpdateScale(clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - result = &autoscaling.Scale{} - err = c.client.Put(). - Resource("clustertesttypes"). - Name(clusterTestTypeName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/doc.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/doc.go deleted file mode 100644 index 3af5d054f..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/example_client.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/example_client.go deleted file mode 100644 index e9ba43512..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/example_client.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - rest "k8s.io/client-go/rest" - v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" - "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme" -) - -type ExampleV1Interface interface { - RESTClient() rest.Interface - ClusterTestTypesGetter - TestTypesGetter -} - -// ExampleV1Client is used to interact with features provided by the example.crd.code-generator.k8s.io group. -type ExampleV1Client struct { - restClient rest.Interface -} - -func (c *ExampleV1Client) ClusterTestTypes() ClusterTestTypeInterface { - return newClusterTestTypes(c) -} - -func (c *ExampleV1Client) TestTypes(namespace string) TestTypeInterface { - return newTestTypes(c, namespace) -} - -// NewForConfig creates a new ExampleV1Client for the given config. -func NewForConfig(c *rest.Config) (*ExampleV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExampleV1Client{client}, nil -} - -// NewForConfigOrDie creates a new ExampleV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ExampleV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ExampleV1Client for the given RESTClient. -func New(c rest.Interface) *ExampleV1Client { - return &ExampleV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ExampleV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go deleted file mode 100644 index edf217df0..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" -) - -// FakeClusterTestTypes implements ClusterTestTypeInterface -type FakeClusterTestTypes struct { - Fake *FakeExampleV1 -} - -var clustertesttypesResource = schema.GroupVersionResource{Group: "example.crd.code-generator.k8s.io", Version: "v1", Resource: "clustertesttypes"} - -var clustertesttypesKind = schema.GroupVersionKind{Group: "example.crd.code-generator.k8s.io", Version: "v1", Kind: "ClusterTestType"} - -// Get takes name of the clusterTestType, and returns the corresponding clusterTestType object, and an error if there is any. -func (c *FakeClusterTestTypes) Get(name string, options v1.GetOptions) (result *examplev1.ClusterTestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustertesttypesResource, name), &examplev1.ClusterTestType{}) - if obj == nil { - return nil, err - } - return obj.(*examplev1.ClusterTestType), err -} - -// List takes label and field selectors, and returns the list of ClusterTestTypes that match those selectors. -func (c *FakeClusterTestTypes) List(opts v1.ListOptions) (result *examplev1.ClusterTestTypeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustertesttypesResource, clustertesttypesKind, opts), &examplev1.ClusterTestTypeList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &examplev1.ClusterTestTypeList{ListMeta: obj.(*examplev1.ClusterTestTypeList).ListMeta} - for _, item := range obj.(*examplev1.ClusterTestTypeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterTestTypes. -func (c *FakeClusterTestTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustertesttypesResource, opts)) -} - -// Create takes the representation of a clusterTestType and creates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Create(clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) - if obj == nil { - return nil, err - } - return obj.(*examplev1.ClusterTestType), err -} - -// Update takes the representation of a clusterTestType and updates it. Returns the server's representation of the clusterTestType, and an error, if there is any. -func (c *FakeClusterTestTypes) Update(clusterTestType *examplev1.ClusterTestType) (result *examplev1.ClusterTestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustertesttypesResource, clusterTestType), &examplev1.ClusterTestType{}) - if obj == nil { - return nil, err - } - return obj.(*examplev1.ClusterTestType), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeClusterTestTypes) UpdateStatus(clusterTestType *examplev1.ClusterTestType) (*examplev1.ClusterTestType, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "status", clusterTestType), &examplev1.ClusterTestType{}) - if obj == nil { - return nil, err - } - return obj.(*examplev1.ClusterTestType), err -} - -// Delete takes name of the clusterTestType and deletes it. Returns an error if one occurs. -func (c *FakeClusterTestTypes) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(clustertesttypesResource, name), &examplev1.ClusterTestType{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustertesttypesResource, listOptions) - - _, err := c.Fake.Invokes(action, &examplev1.ClusterTestTypeList{}) - return err -} - -// Patch applies the patch and returns the patched clusterTestType. -func (c *FakeClusterTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.ClusterTestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, pt, data, subresources...), &examplev1.ClusterTestType{}) - if obj == nil { - return nil, err - } - return obj.(*examplev1.ClusterTestType), err -} - -// GetScale takes name of the clusterTestType, and returns the corresponding scale object, and an error if there is any. -func (c *FakeClusterTestTypes) GetScale(clusterTestTypeName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetSubresourceAction(clustertesttypesResource, "scale", clusterTestTypeName), &autoscaling.Scale{}) - if obj == nil { - return nil, err - } - return obj.(*autoscaling.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeClusterTestTypes) UpdateScale(clusterTestTypeName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(clustertesttypesResource, "scale", scale), &autoscaling.Scale{}) - if obj == nil { - return nil, err - } - return obj.(*autoscaling.Scale), err -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go deleted file mode 100644 index 2ff811a96..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" -) - -// FakeTestTypes implements TestTypeInterface -type FakeTestTypes struct { - Fake *FakeExampleV1 - ns string -} - -var testtypesResource = schema.GroupVersionResource{Group: "example.crd.code-generator.k8s.io", Version: "v1", Resource: "testtypes"} - -var testtypesKind = schema.GroupVersionKind{Group: "example.crd.code-generator.k8s.io", Version: "v1", Kind: "TestType"} - -// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. -func (c *FakeTestTypes) Get(name string, options v1.GetOptions) (result *examplev1.TestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(testtypesResource, c.ns, name), &examplev1.TestType{}) - - if obj == nil { - return nil, err - } - return obj.(*examplev1.TestType), err -} - -// List takes label and field selectors, and returns the list of TestTypes that match those selectors. -func (c *FakeTestTypes) List(opts v1.ListOptions) (result *examplev1.TestTypeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(testtypesResource, testtypesKind, c.ns, opts), &examplev1.TestTypeList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &examplev1.TestTypeList{ListMeta: obj.(*examplev1.TestTypeList).ListMeta} - for _, item := range obj.(*examplev1.TestTypeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested testTypes. -func (c *FakeTestTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(testtypesResource, c.ns, opts)) - -} - -// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Create(testType *examplev1.TestType) (result *examplev1.TestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) - - if obj == nil { - return nil, err - } - return obj.(*examplev1.TestType), err -} - -// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *FakeTestTypes) Update(testType *examplev1.TestType) (result *examplev1.TestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &examplev1.TestType{}) - - if obj == nil { - return nil, err - } - return obj.(*examplev1.TestType), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTestTypes) UpdateStatus(testType *examplev1.TestType) (*examplev1.TestType, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &examplev1.TestType{}) - - if obj == nil { - return nil, err - } - return obj.(*examplev1.TestType), err -} - -// Delete takes name of the testType and deletes it. Returns an error if one occurs. -func (c *FakeTestTypes) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(testtypesResource, c.ns, name), &examplev1.TestType{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(testtypesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &examplev1.TestTypeList{}) - return err -} - -// Patch applies the patch and returns the patched testType. -func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) - - if obj == nil { - return nil, err - } - return obj.(*examplev1.TestType), err -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go deleted file mode 100644 index 9944e3e7c..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/clientset/versioned/typed/example/v1/testtype.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" - scheme "k8s.io/code-generator/_examples/MixedCase/clientset/versioned/scheme" -) - -// TestTypesGetter has a method to return a TestTypeInterface. -// A group's client should implement this interface. -type TestTypesGetter interface { - TestTypes(namespace string) TestTypeInterface -} - -// TestTypeInterface has methods to work with TestType resources. -type TestTypeInterface interface { - Create(*v1.TestType) (*v1.TestType, error) - Update(*v1.TestType) (*v1.TestType, error) - UpdateStatus(*v1.TestType) (*v1.TestType, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.TestType, error) - List(opts metav1.ListOptions) (*v1.TestTypeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) - TestTypeExpansion -} - -// testTypes implements TestTypeInterface -type testTypes struct { - client rest.Interface - ns string -} - -// newTestTypes returns a TestTypes -func newTestTypes(c *ExampleV1Client, namespace string) *testTypes { - return &testTypes{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. -func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.TestType, err error) { - result = &v1.TestType{} - err = c.client.Get(). - Namespace(c.ns). - Resource("testtypes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of TestTypes that match those selectors. -func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.TestTypeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("testtypes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested testTypes. -func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("testtypes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Create(testType *v1.TestType) (result *v1.TestType, err error) { - result = &v1.TestType{} - err = c.client.Post(). - Namespace(c.ns). - Resource("testtypes"). - Body(testType). - Do(). - Into(result) - return -} - -// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. -func (c *testTypes) Update(testType *v1.TestType) (result *v1.TestType, err error) { - result = &v1.TestType{} - err = c.client.Put(). - Namespace(c.ns). - Resource("testtypes"). - Name(testType.Name). - Body(testType). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *testTypes) UpdateStatus(testType *v1.TestType) (result *v1.TestType, err error) { - result = &v1.TestType{} - err = c.client.Put(). - Namespace(c.ns). - Resource("testtypes"). - Name(testType.Name). - SubResource("status"). - Body(testType). - Do(). - Into(result) - return -} - -// Delete takes name of the testType and deletes it. Returns an error if one occurs. -func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("testtypes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("testtypes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched testType. -func (c *testTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { - result = &v1.TestType{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("testtypes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go deleted file mode 100644 index e0607c11a..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/clustertesttype.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - time "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" - versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" - internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" - v1 "k8s.io/code-generator/_examples/MixedCase/listers/example/v1" -) - -// ClusterTestTypeInformer provides access to a shared informer and lister for -// ClusterTestTypes. -type ClusterTestTypeInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.ClusterTestTypeLister -} - -type clusterTestTypeInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewClusterTestTypeInformer constructs a new informer for ClusterTestType type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterTestTypeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterTestTypeInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterTestTypeInformer constructs a new informer for ClusterTestType type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterTestTypeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ExampleV1().ClusterTestTypes().List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ExampleV1().ClusterTestTypes().Watch(options) - }, - }, - &examplev1.ClusterTestType{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterTestTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterTestTypeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterTestTypeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&examplev1.ClusterTestType{}, f.defaultInformer) -} - -func (f *clusterTestTypeInformer) Lister() v1.ClusterTestTypeLister { - return v1.NewClusterTestTypeLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/interface.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/interface.go deleted file mode 100644 index 5389d07fb..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/interface.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // ClusterTestTypes returns a ClusterTestTypeInformer. - ClusterTestTypes() ClusterTestTypeInformer - // TestTypes returns a TestTypeInformer. - TestTypes() TestTypeInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// ClusterTestTypes returns a ClusterTestTypeInformer. -func (v *version) ClusterTestTypes() ClusterTestTypeInformer { - return &clusterTestTypeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// TestTypes returns a TestTypeInformer. -func (v *version) TestTypes() TestTypeInformer { - return &testTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go deleted file mode 100644 index 18f3b88d0..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/informers/externalversions/example/v1/testtype.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - time "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - examplev1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" - versioned "k8s.io/code-generator/_examples/MixedCase/clientset/versioned" - internalinterfaces "k8s.io/code-generator/_examples/MixedCase/informers/externalversions/internalinterfaces" - v1 "k8s.io/code-generator/_examples/MixedCase/listers/example/v1" -) - -// TestTypeInformer provides access to a shared informer and lister for -// TestTypes. -type TestTypeInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.TestTypeLister -} - -type testTypeInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewTestTypeInformer constructs a new informer for TestType type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewTestTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredTestTypeInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredTestTypeInformer constructs a new informer for TestType type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ExampleV1().TestTypes(namespace).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ExampleV1().TestTypes(namespace).Watch(options) - }, - }, - &examplev1.TestType{}, - resyncPeriod, - indexers, - ) -} - -func (f *testTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredTestTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *testTypeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&examplev1.TestType{}, f.defaultInformer) -} - -func (f *testTypeInformer) Lister() v1.TestTypeLister { - return v1.NewTestTypeLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/clustertesttype.go deleted file mode 100644 index 8e93b73f9..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/clustertesttype.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" -) - -// ClusterTestTypeLister helps list ClusterTestTypes. -type ClusterTestTypeLister interface { - // List lists all ClusterTestTypes in the indexer. - List(selector labels.Selector) (ret []*v1.ClusterTestType, err error) - // Get retrieves the ClusterTestType from the index for a given name. - Get(name string) (*v1.ClusterTestType, error) - ClusterTestTypeListerExpansion -} - -// clusterTestTypeLister implements the ClusterTestTypeLister interface. -type clusterTestTypeLister struct { - indexer cache.Indexer -} - -// NewClusterTestTypeLister returns a new ClusterTestTypeLister. -func NewClusterTestTypeLister(indexer cache.Indexer) ClusterTestTypeLister { - return &clusterTestTypeLister{indexer: indexer} -} - -// List lists all ClusterTestTypes in the indexer. -func (s *clusterTestTypeLister) List(selector labels.Selector) (ret []*v1.ClusterTestType, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ClusterTestType)) - }) - return ret, err -} - -// Get retrieves the ClusterTestType from the index for a given name. -func (s *clusterTestTypeLister) Get(name string) (*v1.ClusterTestType, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("clustertesttype"), name) - } - return obj.(*v1.ClusterTestType), nil -} diff --git a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/testtype.go deleted file mode 100644 index 292dcedd0..000000000 --- a/vendor/k8s.io/code-generator/_examples/MixedCase/listers/example/v1/testtype.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1 "k8s.io/code-generator/_examples/MixedCase/apis/example/v1" -) - -// TestTypeLister helps list TestTypes. -type TestTypeLister interface { - // List lists all TestTypes in the indexer. - List(selector labels.Selector) (ret []*v1.TestType, err error) - // TestTypes returns an object that can list and get TestTypes. - TestTypes(namespace string) TestTypeNamespaceLister - TestTypeListerExpansion -} - -// testTypeLister implements the TestTypeLister interface. -type testTypeLister struct { - indexer cache.Indexer -} - -// NewTestTypeLister returns a new TestTypeLister. -func NewTestTypeLister(indexer cache.Indexer) TestTypeLister { - return &testTypeLister{indexer: indexer} -} - -// List lists all TestTypes in the indexer. -func (s *testTypeLister) List(selector labels.Selector) (ret []*v1.TestType, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.TestType)) - }) - return ret, err -} - -// TestTypes returns an object that can list and get TestTypes. -func (s *testTypeLister) TestTypes(namespace string) TestTypeNamespaceLister { - return testTypeNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// TestTypeNamespaceLister helps list and get TestTypes. -type TestTypeNamespaceLister interface { - // List lists all TestTypes in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1.TestType, err error) - // Get retrieves the TestType from the indexer for a given namespace and name. - Get(name string) (*v1.TestType, error) - TestTypeNamespaceListerExpansion -} - -// testTypeNamespaceLister implements the TestTypeNamespaceLister -// interface. -type testTypeNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all TestTypes in the indexer for a given namespace. -func (s testTypeNamespaceLister) List(selector labels.Selector) (ret []*v1.TestType, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.TestType)) - }) - return ret, err -} - -// Get retrieves the TestType from the indexer for a given namespace and name. -func (s testTypeNamespaceLister) Get(name string) (*v1.TestType, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("testtype"), name) - } - return obj.(*v1.TestType), nil -} diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go index 3285a056f..b221d7eb4 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go @@ -16,5 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=example.apiserver.code-generator.k8s.io - package example // import "k8s.io/code-generator/_examples/apiserver/apis/example" diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go index 6b1fe6c11..5b6bd5b30 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go @@ -18,5 +18,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example // +groupName=example.apiserver.code-generator.k8s.io - package v1 diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go index ec5a6e974..dae52ff12 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/v1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *TestType) DeepCopyObject() runtime.Object { func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TestType, len(*in)) diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go index 2639b92a6..980112fa8 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *TestType) DeepCopyObject() runtime.Object { func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TestType, len(*in)) diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go index 0edb56dcd..386480375 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go @@ -17,5 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=example.test.apiserver.code-generator.k8s.io // +groupGoName=SecondExample - package example2 // import "k8s.io/code-generator/_examples/apiserver/apis/example2" diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go index 211aefc8c..36bd4549c 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go @@ -19,5 +19,4 @@ limitations under the License. // +groupName=example.test.apiserver.code-generator.k8s.io // +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example2 // +groupGoName=SecondExample - package v1 diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go index ec5a6e974..dae52ff12 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *TestType) DeepCopyObject() runtime.Object { func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TestType, len(*in)) diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go index dbb70bc3a..f9b317e57 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *TestType) DeepCopyObject() runtime.Object { func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TestType, len(*in)) diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go index bd17e82b7..37f1b724d 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go @@ -43,7 +43,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{tracker: o} + cs := &Clientset{} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -65,17 +65,12 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - var _ clientset.Interface = &Clientset{} // Example retrieves the ExampleClient diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go index abff7bb0d..7b247ca95 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go index a06b94616..d19392e85 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/testtype.go @@ -19,8 +19,6 @@ limitations under the License. package internalversion import ( - "time" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -78,16 +76,11 @@ func (c *testTypes) Get(name string, options v1.GetOptions) (result *example.Tes // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts v1.ListOptions) (result *example.TestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &example.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *testTypes) List(opts v1.ListOptions) (result *example.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *testTypes) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go index 0d63cf103..2890ff19d 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example2.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example2.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go index 5380b86ce..1a2ca7891 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/testtype.go @@ -19,8 +19,6 @@ limitations under the License. package internalversion import ( - "time" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -78,16 +76,11 @@ func (c *testTypes) Get(name string, options v1.GetOptions) (result *example2.Te // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts v1.ListOptions) (result *example2.TestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &example2.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *testTypes) List(opts v1.ListOptions) (result *example2.TestTypeList, er // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *testTypes) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go index 7039a827b..8ba9799f5 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go @@ -29,7 +29,11 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ExampleV1() examplev1.ExampleV1Interface + // Deprecated: please explicitly pick a version if possible. + Example() examplev1.ExampleV1Interface SecondExampleV1() secondexamplev1.SecondExampleV1Interface + // Deprecated: please explicitly pick a version if possible. + SecondExample() secondexamplev1.SecondExampleV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -45,11 +49,23 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return c.exampleV1 } +// Deprecated: Example retrieves the default version of ExampleClient. +// Please explicitly pick a version. +func (c *Clientset) Example() examplev1.ExampleV1Interface { + return c.exampleV1 +} + // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return c.secondExampleV1 } +// Deprecated: SecondExample retrieves the default version of SecondExampleClient. +// Please explicitly pick a version. +func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { + return c.secondExampleV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go index 42690b1ec..6dce4fed4 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go @@ -43,7 +43,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{tracker: o} + cs := &Clientset{} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -65,17 +65,12 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - var _ clientset.Interface = &Clientset{} // ExampleV1 retrieves the ExampleV1Client @@ -83,7 +78,17 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} } +// Example retrieves the ExampleV1Client +func (c *Clientset) Example() examplev1.ExampleV1Interface { + return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} +} + // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} } + +// SecondExample retrieves the SecondExampleV1Client +func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { + return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go index 6fba6c1a6..743c8ded2 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/example_client.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" v1 "k8s.io/code-generator/_examples/apiserver/apis/example/v1" "k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme" @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go index f7e2aacde..6847f17d5 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &examplev1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go index e25fd1fc2..2a0606512 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/testtype.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -78,16 +76,11 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go index d169f6030..1271c0f19 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/example2_client.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" v1 "k8s.io/code-generator/_examples/apiserver/apis/example2/v1" "k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme" @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index ce0782e6e..51a3f3e7d 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example2v1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2v1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example2v1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go index f5afee94c..b29bd8118 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/testtype.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -78,16 +76,11 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go index 21c666902..6318f5284 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -27,7 +27,6 @@ import ( versioned "k8s.io/code-generator/_examples/apiserver/clientset/versioned" ) -// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -36,5 +35,4 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go index 5d01820f9..03c679244 100644 --- a/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -27,7 +27,6 @@ import ( internalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion" ) -// NewInformerFunc takes internalversion.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(internalversion.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -36,5 +35,4 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go b/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go index 673ac55d7..e6614c0da 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go @@ -17,5 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:defaulter-gen=TypeMeta // +groupName=example.crd.code-generator.k8s.io - package v1 diff --git a/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go b/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go index 9f55a39dd..a3b4bfa9c 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/code-generator/_examples/crd/apis/example/v1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *ClusterTestType) DeepCopyObject() runtime.Object { func (in *ClusterTestTypeList) DeepCopyInto(out *ClusterTestTypeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterTestType, len(*in)) @@ -131,7 +131,7 @@ func (in *TestType) DeepCopyObject() runtime.Object { func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TestType, len(*in)) diff --git a/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go b/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go index 5d1cbec5e..6521d83ff 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go +++ b/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go @@ -18,5 +18,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=example.test.crd.code-generator.k8s.io // +groupGoName=SecondExample - package v1 diff --git a/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go b/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go index ec5a6e974..dae52ff12 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/code-generator/_examples/crd/apis/example2/v1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *TestType) DeepCopyObject() runtime.Object { func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]TestType, len(*in)) diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go index 812dbd124..034e06ad0 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/clientset.go @@ -29,7 +29,11 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ExampleV1() examplev1.ExampleV1Interface + // Deprecated: please explicitly pick a version if possible. + Example() examplev1.ExampleV1Interface SecondExampleV1() secondexamplev1.SecondExampleV1Interface + // Deprecated: please explicitly pick a version if possible. + SecondExample() secondexamplev1.SecondExampleV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -45,11 +49,23 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return c.exampleV1 } +// Deprecated: Example retrieves the default version of ExampleClient. +// Please explicitly pick a version. +func (c *Clientset) Example() examplev1.ExampleV1Interface { + return c.exampleV1 +} + // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return c.secondExampleV1 } +// Deprecated: SecondExample retrieves the default version of SecondExampleClient. +// Please explicitly pick a version. +func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { + return c.secondExampleV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go index 0c950920e..4ad8bba75 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/fake/clientset_generated.go @@ -43,7 +43,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{tracker: o} + cs := &Clientset{} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -65,17 +65,12 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - var _ clientset.Interface = &Clientset{} // ExampleV1 retrieves the ExampleV1Client @@ -83,7 +78,17 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} } +// Example retrieves the ExampleV1Client +func (c *Clientset) Example() examplev1.ExampleV1Interface { + return &fakeexamplev1.FakeExampleV1{Fake: &c.Fake} +} + // SecondExampleV1 retrieves the SecondExampleV1Client func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} } + +// SecondExample retrieves the SecondExampleV1Client +func (c *Clientset) SecondExample() secondexamplev1.SecondExampleV1Interface { + return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go index 1217dd867..30e0e7498 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/clustertesttype.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -79,15 +77,10 @@ func (c *clusterTestTypes) Get(name string, options metav1.GetOptions) (result * // List takes label and field selectors, and returns the list of ClusterTestTypes that match those selectors. func (c *clusterTestTypes) List(opts metav1.ListOptions) (result *v1.ClusterTestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ClusterTestTypeList{} err = c.client.Get(). Resource("clustertesttypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,15 +88,10 @@ func (c *clusterTestTypes) List(opts metav1.ListOptions) (result *v1.ClusterTest // Watch returns a watch.Interface that watches the requested clusterTestTypes. func (c *clusterTestTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clustertesttypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -157,14 +145,9 @@ func (c *clusterTestTypes) Delete(name string, options *metav1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *clusterTestTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clustertesttypes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go index 358d7d9d6..25d80728c 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/example_client.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" v1 "k8s.io/code-generator/_examples/crd/apis/example/v1" "k8s.io/code-generator/_examples/crd/clientset/versioned/scheme" @@ -75,7 +76,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go index 92d87b12f..e0b0376b8 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_clustertesttype.go @@ -124,7 +124,7 @@ func (c *FakeClusterTestTypes) DeleteCollection(options *v1.DeleteOptions, listO // Patch applies the patch and returns the patched clusterTestType. func (c *FakeClusterTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.ClusterTestType, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, pt, data, subresources...), &examplev1.ClusterTestType{}) + Invokes(testing.NewRootPatchSubresourceAction(clustertesttypesResource, name, data, subresources...), &examplev1.ClusterTestType{}) if obj == nil { return nil, err } @@ -134,7 +134,7 @@ func (c *FakeClusterTestTypes) Patch(name string, pt types.PatchType, data []byt // GetScale takes name of the clusterTestType, and returns the corresponding scale object, and an error if there is any. func (c *FakeClusterTestTypes) GetScale(clusterTestTypeName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetSubresourceAction(clustertesttypesResource, "scale", clusterTestTypeName), &autoscaling.Scale{}) + Invokes(testing.NewRootGetSubresourceAction(clustertesttypesResource, clusterTestTypeName), &autoscaling.Scale{}) if obj == nil { return nil, err } diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go index 3ef9885d2..b284a5797 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *examplev1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &examplev1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &examplev1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go index 164b0510e..6fafb1e1b 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example/v1/testtype.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -78,16 +76,11 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go index 20ef56417..210b6509f 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/example2_client.go @@ -19,6 +19,7 @@ limitations under the License. package v1 import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" v1 "k8s.io/code-generator/_examples/crd/apis/example2/v1" "k8s.io/code-generator/_examples/crd/clientset/versioned/scheme" @@ -70,7 +71,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go index c4efc6597..7c37bd816 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/fake/fake_testtype.go @@ -131,7 +131,7 @@ func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions // Patch applies the patch and returns the patched testType. func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example2v1.TestType, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example2v1.TestType{}) + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, data, subresources...), &example2v1.TestType{}) if obj == nil { return nil, err diff --git a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go index 2e3194e00..69b0e64c5 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go +++ b/vendor/k8s.io/code-generator/_examples/crd/clientset/versioned/typed/example2/v1/testtype.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -78,16 +76,11 @@ func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.Test // List takes label and field selectors, and returns the list of TestTypes that match those selectors. func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.TestTypeList{} err = c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err // Watch returns a watch.Interface that watches the requested testTypes. func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("testtypes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go index 86d63f376..02e0d6ac3 100644 --- a/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/code-generator/_examples/crd/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -27,7 +27,6 @@ import ( versioned "k8s.io/code-generator/_examples/crd/clientset/versioned" ) -// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -36,5 +35,4 @@ type SharedInformerFactory interface { InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS index 62866d0b1..0c408a1aa 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - approvers: - lavalamp - wojtek-t diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/README.md b/vendor/k8s.io/code-generator/cmd/client-gen/README.md index 092a61151..d1d67abdf 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/README.md +++ b/vendor/k8s.io/code-generator/cmd/client-gen/README.md @@ -1,4 +1,4 @@ -See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/sig-api-machinery/generating-clientset.md) +See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/generating-clientset.md) [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/staging/src/k8s.io/code-generator/client-gen/README.md?pixel)]() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 18980744f..335e995c0 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -27,13 +27,12 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" "k8s.io/code-generator/cmd/client-gen/path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" - "k8s.io/klog" + "github.com/golang/glog" ) // NameSystems returns the name system used by the generators in this package. @@ -102,7 +101,7 @@ func NameSystems() namer.NameSystems { "publicPlural": publicPluralNamer, "privatePlural": privatePluralNamer, "allLowercasePlural": lowercaseNamer, - "resource": codegennamer.NewTagOverrideNamer("resourceName", lowercaseNamer), + "resource": NewTagOverrideNamer("resourceName", lowercaseNamer), } } @@ -131,7 +130,7 @@ func DefaultNameSystem() string { } func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, apiPath string, srcTreePath string, inputPackage string, boilerplate []byte) generator.Package { - groupVersionClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) + groupVersionClientPackage := strings.ToLower(filepath.Join(clientsetPackage, "typed", groupPackageName, gv.Version.NonEmpty())) return &generator.DefaultPackage{ PackageName: strings.ToLower(gv.Version.NonEmpty()), PackagePath: groupVersionClientPackage, @@ -319,12 +318,12 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) + glog.Fatalf("Failed loading boilerplate: %v", err) } customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs) if !ok { - klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") + glog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") } includedTypesOverrides := customArgs.IncludedTypesOverrides @@ -401,3 +400,27 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat return generator.Packages(packageList) } + +// tagOverrideNamer is a namer which pulls names from a given tag, if specified, +// and otherwise falls back to a different namer. +type tagOverrideNamer struct { + tagName string + fallback namer.Namer +} + +func (n *tagOverrideNamer) Name(t *types.Type) string { + if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { + return nameOverride + } + + return n.fallback.Name(t) +} + +// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as +// the name, or falls back to another Namer if the tag is not present. +func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { + return &tagOverrideNamer{ + tagName: tagName, + fallback: fallback, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go index 4b3854be6..ec439c2f7 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -30,9 +30,9 @@ import ( ) func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, inputPackage string, boilerplate []byte) generator.Package { - outputPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()), "fake") + outputPackage := strings.ToLower(filepath.Join(clientsetPackage, "typed", groupPackageName, gv.Version.NonEmpty(), "fake")) // TODO: should make this a function, called by here and in client-generator.go - realClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) + realClientPackage := filepath.Join(clientsetPackage, "typed", groupPackageName, gv.Version.NonEmpty()) return &generator.DefaultPackage{ PackageName: "fake", PackagePath: outputPackage, diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index 55709aab2..f1225acb3 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -60,12 +60,12 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) for _, group := range g.groups { for _, version := range group.Versions { - groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", group.PackageName, version.NonEmpty()) fakeGroupClientPackage := filepath.Join(groupClientPackage, "fake") groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) - imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), groupClientPackage)) - imports = append(imports, fmt.Sprintf("fake%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), fakeGroupClientPackage)) + imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.NonEmpty(), groupClientPackage))) + imports = append(imports, strings.ToLower(fmt.Sprintf("fake%s%s \"%s\"", groupAlias, version.NonEmpty(), fakeGroupClientPackage))) } } // the package that has the clientset Interface @@ -102,6 +102,10 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr } sw.Do(clientsetInterfaceImplTemplate, m) + // don't generated the default method if generating internalversion clientset + if group.IsDefaultVersion && group.Version != "" { + sw.Do(clientsetInterfaceDefaultVersionImpl, m) + } } return sw.Error() @@ -121,7 +125,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{tracker: o} + cs := &Clientset{} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -143,16 +147,11 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } - -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} ` var checkImpl = ` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go index 8f4d5785e..675fa5f6f 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go @@ -64,7 +64,7 @@ func (g *genFakeForGroup) Namers(c *generator.Context) namer.NameSystems { func (g *genFakeForGroup) Imports(c *generator.Context) (imports []string) { imports = g.imports.ImportLines() if len(g.types) != 0 { - imports = append(imports, fmt.Sprintf("%s \"%s\"", strings.ToLower(filepath.Base(g.realClientPackage)), g.realClientPackage)) + imports = append(imports, strings.ToLower(fmt.Sprintf("%s \"%s\"", filepath.Base(g.realClientPackage), g.realClientPackage))) } return imports } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index f5888aef1..bf18c14c6 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -362,7 +362,7 @@ var getSubresourceTemplate = ` func (c *Fake$.type|publicPlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { obj, err := c.Fake. $if .namespaced$Invokes($.NewGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${}) - $else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${})$end$ + $else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name), &$.resultType|raw${})$end$ if obj == nil { return nil, err } @@ -469,8 +469,8 @@ var patchTemplate = ` // Patch applies the patch and returns the patched $.resultType|private$. func (c *Fake$.type|publicPlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, subresources... ), &$.resultType|raw${}) - $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, pt, data, subresources...), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, data, subresources... ), &$.resultType|raw${}) + $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, data, subresources...), &$.resultType|raw${})$end$ if obj == nil { return nil, err } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index a1e67dcbd..18ec09ac6 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -58,9 +58,9 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) for _, group := range g.groups { for _, version := range group.Versions { - typedClientPath := filepath.Join(g.clientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + typedClientPath := filepath.Join(g.clientsetPackage, "typed", group.PackageName, version.NonEmpty()) groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) - imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), typedClientPath)) + imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.NonEmpty(), typedClientPath))) } } return @@ -88,6 +88,10 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr sw.Do(clientsetTemplate, m) for _, g := range allGroups { sw.Do(clientsetInterfaceImplTemplate, g) + // don't generated the default method if generating internalversion clientset + if g.IsDefaultVersion && g.Version != "" { + sw.Do(clientsetInterfaceDefaultVersionImpl, g) + } } sw.Do(getDiscoveryTemplate, m) sw.Do(newClientsetForConfigTemplate, m) @@ -101,7 +105,9 @@ var clientsetInterface = ` type Interface interface { Discovery() $.DiscoveryInterface|raw$ $range .allGroups$$.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface - $end$ + $if .IsDefaultVersion$// Deprecated: please explicitly pick a version if possible. + $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface + $end$$end$ } ` @@ -122,6 +128,14 @@ func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.V } ` +var clientsetInterfaceDefaultVersionImpl = ` +// Deprecated: $.GroupGoName$ retrieves the default version of $.GroupGoName$Client. +// Please explicitly pick a version. +func (c *Clientset) $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { + return c.$.LowerCaseGroupGoName$$.Version$ +} +` + var getDiscoveryTemplate = ` // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() $.DiscoveryInterface|raw$ { diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go index 215a0171c..fd59715c4 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -98,6 +98,7 @@ func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer "apiPath": apiPath(g.group), "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), + "serializerDirectCodecFactory": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/serializer", Name: "DirectCodecFactory"}), "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), @@ -235,7 +236,7 @@ func setConfigDefaults(config *$.restConfig|raw$) error { gv := $.SchemeGroupVersion|raw$ config.GroupVersion = &gv config.APIPath = $.apiPath$ - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = $.serializerDirectCodecFactory|raw${CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go index 3e8fc7c4c..92e2a97f1 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go @@ -387,16 +387,11 @@ func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privateP var listTemplate = ` // List takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. func (c *$.type|privatePlural$) List(opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil{ - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &$.resultType|raw$List{} err = c.client.Get(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). - Timeout(timeout). Do(). Into(result) return @@ -406,10 +401,6 @@ func (c *$.type|privatePlural$) List(opts $.ListOptions|raw$) (result *$.resultT var listSubresourceTemplate = ` // List takes $.type|raw$ name, label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. func (c *$.type|privatePlural$) List($.type|private$Name string, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil{ - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &$.resultType|raw$List{} err = c.client.Get(). $if .namespaced$Namespace(c.ns).$end$ @@ -417,7 +408,6 @@ func (c *$.type|privatePlural$) List($.type|private$Name string, opts $.ListOpti Name($.type|private$Name). SubResource("$.subresourcePath$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). - Timeout(timeout). Do(). Into(result) return @@ -471,15 +461,10 @@ func (c *$.type|privatePlural$) Delete(name string, options *$.DeleteOptions|raw var deleteCollectionTemplate = ` // DeleteCollection deletes a collection of objects. func (c *$.type|privatePlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil{ - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). VersionedParams(&listOptions, $.schemeParameterCodec|raw$). - Timeout(timeout). Body(options). Do(). Error() @@ -568,16 +553,11 @@ func (c *$.type|privatePlural$) UpdateStatus($.type|private$ *$.type|raw$) (resu var watchTemplate = ` // Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. func (c *$.type|privatePlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil{ - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). - Timeout(timeout). Watch() } ` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go index a698a28b6..60cfbcc0f 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go @@ -69,11 +69,10 @@ func (g *GenScheme) Imports(c *generator.Context) (imports []string) { packagePath = filepath.Dir(packagePath) } packagePath = filepath.Join(packagePath, "install") - - imports = append(imports, fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath))) + imports = append(imports, strings.ToLower(fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath)))) break } else { - imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.Version.NonEmpty()), path.Vendorless(packagePath))) + imports = append(imports, strings.ToLower(fmt.Sprintf("%s%s \"%s\"", groupAlias, version.Version.NonEmpty(), path.Vendorless(packagePath)))) } } } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go similarity index 58% rename from vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go rename to vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go index df6da06eb..b00408103 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by lister-gen. DO NOT EDIT. +package generators -package v1beta1 +import ( + "k8s.io/gengo/types" +) -// IngressListerExpansion allows custom methods to be added to -// IngressLister. -type IngressListerExpansion interface{} +// extractTag gets the comment-tags for the key. If the tag did not exist, it +// returns the empty string. +func extractTag(key string, lines []string) string { + val, present := types.ExtractCommentTags("+", lines)[key] + if !present || len(val) < 1 { + return "" + } -// IngressNamespaceListerExpansion allows custom methods to be added to -// IngressNamespaceLister. -type IngressNamespaceListerExpansion interface{} + return val[0] +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/main.go b/vendor/k8s.io/code-generator/cmd/client-gen/main.go index 6e0d187f5..22c28e35f 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/main.go @@ -21,9 +21,9 @@ import ( "flag" "path/filepath" + "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" - "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/client-gen/args" "k8s.io/code-generator/cmd/client-gen/generators" @@ -31,7 +31,6 @@ import ( ) func main() { - klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -53,7 +52,7 @@ func main() { } if err := generatorargs.Validate(genericArgs); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } if err := genericArgs.Execute( @@ -61,6 +60,6 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go index 59f2fd444..33e6ac451 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -73,7 +73,7 @@ func (a sortableSliceOfVersions) Less(i, j int) bool { } // Determine the default version among versions. If a user calls a group client -// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the +// without specifying the version (e.g., c.Core(), instead of c.CoreV1()), the // default version will be returned. func defaultVersion(versions []PackageVersion) Version { var versionStrings []string @@ -88,12 +88,14 @@ func defaultVersion(versions []PackageVersion) Version { func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo { var groupVersionPackages []GroupVersionInfo for _, group := range groups { + defaultVersion := defaultVersion(group.Versions) for _, version := range group.Versions { groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}] groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{ Group: Group(namer.IC(group.Group.NonEmpty())), Version: Version(namer.IC(version.Version.String())), PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()), + IsDefaultVersion: version.Version == defaultVersion && version.Version != "", GroupGoName: groupGoName, LowerCaseGroupGoName: namer.IL(groupGoName), }) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go index 7d1606c50..17fd6e92a 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -62,8 +62,11 @@ type GroupVersions struct { // GroupVersionInfo contains all the info around a group version. type GroupVersionInfo struct { - Group Group - Version Version + Group Group + Version Version + // If a user calls a group client without specifying the version (e.g., + // c.Core(), instead of c.CoreV1()), the default version will be returned. + IsDefaultVersion bool PackageAlias string GroupGoName string LowerCaseGroupGoName string diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go index 775972d12..422237e11 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -29,7 +29,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "k8s.io/klog" + "github.com/golang/glog" conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" ) @@ -124,10 +124,10 @@ type conversionFuncMap map[conversionPair]*types.Type // Returns all manually-defined conversion functions in the package. func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) { if pkg == nil { - klog.Warningf("Skipping nil package passed to getManualConversionFunctions") + glog.Warningf("Skipping nil package passed to getManualConversionFunctions") return } - klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) + glog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) scopeName := types.Ref(conversionPackagePath, "Scope").Name errorName := types.Ref("", "error").Name @@ -136,34 +136,34 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package for _, f := range pkg.Functions { if f.Underlying == nil || f.Underlying.Kind != types.Func { - klog.Errorf("Malformed function: %#v", f) + glog.Errorf("Malformed function: %#v", f) continue } if f.Underlying.Signature == nil { - klog.Errorf("Function without signature: %#v", f) + glog.Errorf("Function without signature: %#v", f) continue } - klog.V(8).Infof("Considering function %s", f.Name) + glog.V(8).Infof("Considering function %s", f.Name) signature := f.Underlying.Signature // Check whether the function is conversion function. // Note that all of them have signature: // func Convert_inType_To_outType(inType, outType, conversion.Scope) error if signature.Receiver != nil { - klog.V(8).Infof("%s has a receiver", f.Name) + glog.V(8).Infof("%s has a receiver", f.Name) continue } if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { - klog.V(8).Infof("%s has wrong parameters", f.Name) + glog.V(8).Infof("%s has wrong parameters", f.Name) continue } if len(signature.Results) != 1 || signature.Results[0].Name != errorName { - klog.V(8).Infof("%s has wrong results", f.Name) + glog.V(8).Infof("%s has wrong results", f.Name) continue } inType := signature.Parameters[0] outType := signature.Parameters[1] if inType.Kind != types.Pointer || outType.Kind != types.Pointer { - klog.V(8).Infof("%s has wrong parameter types", f.Name) + glog.V(8).Infof("%s has wrong parameter types", f.Name) continue } // Now check if the name satisfies the convention. @@ -171,7 +171,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package args := argsFromType(inType.Elem, outType.Elem) sw.Do("Convert_$.inType|public$_To_$.outType|public$", args) if f.Name.Name == buffer.String() { - klog.V(4).Infof("Found conversion function %s", f.Name) + glog.V(4).Infof("Found conversion function %s", f.Name) key := conversionPair{inType.Elem, outType.Elem} // We might scan the same package twice, and that's OK. if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path { @@ -181,9 +181,9 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package } else { // prevent user error when they don't get the correct conversion signature if strings.HasPrefix(f.Name.Name, "Convert_") { - klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) + glog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) } - klog.V(8).Infof("%s has wrong name", f.Name) + glog.V(8).Infof("%s has wrong name", f.Name) } buffer.Reset() } @@ -192,7 +192,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) + glog.Fatalf("Failed loading boilerplate: %v", err) } packages := generator.Packages{} @@ -220,7 +220,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } processed[i] = true - klog.V(5).Infof("considering pkg %q", i) + glog.V(5).Infof("considering pkg %q", i) pkg := context.Universe[i] // typesPkg is where the versioned types are defined. Sometimes it is // different from pkg. For example, kubernetes core/v1 types are defined @@ -239,9 +239,9 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // in their doc.go file. peerPkgs := extractTag(pkg.Comments) if peerPkgs != nil { - klog.V(5).Infof(" tags: %q", peerPkgs) + glog.V(5).Infof(" tags: %q", peerPkgs) } else { - klog.V(5).Infof(" no tag") + glog.V(5).Infof(" no tag") continue } skipUnsafe := false @@ -255,14 +255,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat externalTypesValues := extractExternalTypesTag(pkg.Comments) if externalTypesValues != nil { if len(externalTypesValues) != 1 { - klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) + glog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) } externalTypes := externalTypesValues[0] - klog.V(5).Infof(" external types tags: %q", externalTypes) + glog.V(5).Infof(" external types tags: %q", externalTypes) var err error typesPkg, err = context.AddDirectory(externalTypes) if err != nil { - klog.Fatalf("cannot import package %s", externalTypes) + glog.Fatalf("cannot import package %s", externalTypes) } // update context.Order to the latest context.Universe orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} @@ -291,7 +291,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat context.AddDir(pp) p := context.Universe[pp] if nil == p { - klog.Fatalf("failed to find pkg: %s", pp) + glog.Fatalf("failed to find pkg: %s", pp) } getManualConversionFunctions(context, p, manualConversions) } @@ -335,7 +335,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // from being a candidate for unsafe conversion for k, v := range manualConversions { if isCopyOnly(v.CommentLines) { - klog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) + glog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) continue } // this type should be excluded from all equivalence, because the converter must be called. @@ -518,9 +518,9 @@ func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type tagvals := extractTag(t.CommentLines) if tagvals != nil { if tagvals[0] != "false" { - klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) + glog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) } - klog.V(5).Infof("type %v requests no conversion generation, skipping", t) + glog.V(5).Infof("type %v requests no conversion generation, skipping", t) return false } // TODO: Consider generating functions for other kinds too. @@ -582,10 +582,10 @@ func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, boo } func (g *genConversion) Init(c *generator.Context, w io.Writer) error { - if klog.V(5) { + if glog.V(5) { if m, ok := g.useUnsafe.(equalMemoryTypes); ok { var result []string - klog.Infof("All objects without identical memory layout:") + glog.Infof("All objects without identical memory layout:") for k, v := range m { if v { continue @@ -594,7 +594,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { } sort.Strings(result) for _, s := range result { - klog.Infof(s) + glog.Infof(s) } } } @@ -643,7 +643,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { } func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - klog.V(5).Infof("generating for type %v", t) + glog.V(5).Infof("generating for type %v", t) peerType := getPeerTypeFor(c, t, g.peerPackages) sw := generator.NewSnippetWriter(w, c, "$", "$") g.generateConversion(t, peerType, sw) @@ -664,10 +664,10 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene // There is a public manual Conversion method: use it. } else if skipped := g.skippedFields[inType]; len(skipped) != 0 { // The inType had some fields we could not generate. - klog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) - klog.Errorf(" the following fields need manual conversion:") + glog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) + glog.Errorf(" the following fields need manual conversion:") for _, f := range skipped { - klog.Errorf(" - %v", f) + glog.Errorf(" - %v", f) } } else { // Emit a public conversion function. @@ -682,7 +682,7 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene // at any nesting level. This makes the autogenerator easy to understand, and // the compiler shouldn't care. func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) { - klog.V(5).Infof("generating %v -> %v", inType, outType) + glog.V(5).Infof("generating %v -> %v", inType, outType) var f func(*types.Type, *types.Type, *generator.SnippetWriter) switch inType.Kind { @@ -853,7 +853,7 @@ func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.Snip sw.Do("}\n", nil) continue } - klog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) + glog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) } // If we can't auto-convert, punt before we emit any code. diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go index 215b17bde..f2b91cc2e 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -14,59 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -// conversion-gen is a tool for auto-generating functions that convert -// between internal and external types. A general conversion code -// generation task involves three sets of packages: (1) a set of -// packages containing internal types, (2) a single package containing -// the external types, and (3) a single destination package (i.e., -// where the generated conversion functions go, and where the -// developer-authored conversion functions are). The packages -// containing the internal types play the role known as "peer -// packages" in the general code-generation framework of Kubernetes. +// conversion-gen is a tool for auto-generating Conversion functions. // -// For each conversion task, `conversion-gen` will generate functions -// that efficiently convert between same-name types in the two -// (internal, external) packages. The generated functions include -// ones named -// autoConvert___To__ -// for each such pair of types --- both with (pkg1,pkg2) = -// (internal,external) and (pkg1,pkg2) = (external,internal). -// Additionally: if the destination package does not contain one in a -// non-generated file then a function named -// Convert___To__ -// is also generated and it simply calls the `autoConvert...` -// function. The generated conversion functions use standard value -// assignment wherever possible. For compound types, the generated -// conversion functions call the `Convert...` functions for the -// subsidiary types. Thus developers can override the behavior for -// selected types. For a top-level object type (i.e., the type of an -// object that will be input to an apiserver), for such an override to -// be used by the apiserver the developer-maintained conversion -// functions must also be registered by invoking the -// `AddConversionFuncs` method of the relevant `Scheme` object from -// k8s.io/apimachinery/pkg/runtime. +// Given a list of input directories, it will scan for "peer" packages and +// generate functions that efficiently convert between same-name types in each +// package. For any pair of types that has a +// `Convert___To__ -// This introduces a conversion task, for which the destination -// package is the one containing the file with the tag and the tag -// identifies a package containing internal types. If there is also a -// tag of the form -// // +k8s:conversion-gen-external-types= -// then it identifies the package containing the external types; -// otherwise they are in the destination package. -// -// For each conversion code generation task, the full set of internal -// packages (AKA peer packages) consists of the ones specified in the -// `k8s:conversion-gen` tags PLUS any specified in the -// `--base-peer-dirs` and `--extra-peer-dirs` flags on the command -// line. +// Generation is governed by comment tags in the source. Any package may +// request Conversion generation by including a comment in the file-comments of +// one file, of the form: +// // +k8s:conversion-gen= // // When generating for a package, individual types or fields of structs may opt // out of Conversion generation by specifying a comment on the of the form: @@ -77,9 +38,9 @@ import ( "flag" "path/filepath" + "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" - "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" "k8s.io/code-generator/cmd/conversion-gen/generators" @@ -87,7 +48,6 @@ import ( ) func main() { - klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -101,7 +61,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } // Run it. @@ -110,7 +70,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go index 96fb29873..cce65b772 100644 --- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -46,17 +46,16 @@ import ( "flag" "path/filepath" + "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/examples/deepcopy-gen/generators" - "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" "k8s.io/code-generator/pkg/util" ) func main() { - klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -70,7 +69,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } // Run it. @@ -79,7 +78,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go index 40bb875e5..9d33f700b 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -45,17 +45,16 @@ import ( "flag" "path/filepath" + "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/examples/defaulter-gen/generators" - "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" "k8s.io/code-generator/pkg/util" ) func main() { - klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -69,7 +68,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } // Run it. @@ -78,7 +77,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS index 613659162..05d4b2a65 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - approvers: - smarterclayton reviewers: diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go index e85ceb8d1..555073225 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -25,20 +25,16 @@ import ( "os" "os/exec" "path/filepath" - "sort" "strings" - flag "github.com/spf13/pflag" - "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/simple" - "gonum.org/v1/gonum/graph/topo" - "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/parser" "k8s.io/gengo/types" + + flag "github.com/spf13/pflag" ) type Generator struct { @@ -206,18 +202,6 @@ func Run(g *Generator) { c.Verify = g.Common.VerifyOnly c.FileTypes["protoidl"] = NewProtoFile() - // order package by imports, importees first - deps := deps(c, protobufNames.packages) - order, err := importOrder(deps) - if err != nil { - log.Fatalf("Failed to order packages by imports: %v", err) - } - topologicalPos := map[string]int{} - for i, p := range order { - topologicalPos[p] = i - } - sort.Sort(positionOrder{topologicalPos, protobufNames.packages}) - var vendoredOutputPackages, localOutputPackages generator.Packages for _, p := range protobufNames.packages { if _, ok := nonOutputPackages[p.Name()]; ok { @@ -363,66 +347,3 @@ func Run(g *Generator) { } } } - -func deps(c *generator.Context, pkgs []*protobufPackage) map[string][]string { - ret := map[string][]string{} - for _, p := range pkgs { - for _, d := range c.Universe[p.PackagePath].Imports { - ret[p.PackagePath] = append(ret[p.PackagePath], d.Path) - } - } - return ret -} - -func importOrder(deps map[string][]string) ([]string, error) { - nodes := map[string]graph.Node{} - names := map[int64]string{} - g := simple.NewDirectedGraph() - for pkg, imports := range deps { - for _, imp := range imports { - if _, found := nodes[pkg]; !found { - n := g.NewNode() - g.AddNode(n) - nodes[pkg] = n - names[n.ID()] = pkg - } - if _, found := nodes[imp]; !found { - n := g.NewNode() - g.AddNode(n) - nodes[imp] = n - names[n.ID()] = imp - } - g.SetEdge(g.NewEdge(nodes[imp], nodes[pkg])) - } - } - - ret := []string{} - sorted, err := topo.Sort(g) - if err != nil { - return nil, err - } - for _, n := range sorted { - ret = append(ret, names[n.ID()]) - fmt.Println("topological order", names[n.ID()]) - } - return ret, nil -} - -type positionOrder struct { - pos map[string]int - elements []*protobufPackage -} - -func (o positionOrder) Len() int { - return len(o.elements) -} - -func (o positionOrder) Less(i, j int) bool { - return o.pos[o.elements[i].PackagePath] < o.pos[o.elements[j].PackagePath] -} - -func (o positionOrder) Swap(i, j int) { - x := o.elements[i] - o.elements[i] = o.elements[j] - o.elements[j] = x -} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go index 1a9803dc8..0e7a7d8ec 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -25,7 +25,7 @@ import ( "strconv" "strings" - "k8s.io/klog" + "github.com/golang/glog" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -85,7 +85,7 @@ func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool { // Type specified "true". return true } - klog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) + glog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) } if !g.generateAll { // We're not generating everything. @@ -724,10 +724,6 @@ func genComment(out io.Writer, lines []string, indent string) { lines = lines[:l-1] } for _, c := range lines { - if len(c) == 0 { - fmt.Fprintf(out, "%s//\n", indent) // avoid trailing whitespace - continue - } fmt.Fprintf(out, "%s// %s\n", indent, c) } } diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go index 8e2a1917d..2dff5b922 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go @@ -17,8 +17,8 @@ limitations under the License. package protobuf import ( + "github.com/golang/glog" "k8s.io/gengo/types" - "k8s.io/klog" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if @@ -27,7 +27,7 @@ import ( func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { - klog.Fatal(err) + glog.Fatal(err) } return val } diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/main.go b/vendor/k8s.io/code-generator/cmd/import-boss/main.go index da099fda7..d99899441 100644 --- a/vendor/k8s.io/code-generator/cmd/import-boss/main.go +++ b/vendor/k8s.io/code-generator/cmd/import-boss/main.go @@ -22,13 +22,9 @@ limitations under the License. // // If an ".import-restrictions" file is found, then all imports of the package // are checked against each "rule" in the file. A rule consists of three parts: -// -// - A SelectorRegexp, to select the import paths that the rule applies to. -// -// - A list of AllowedPrefixes -// -// - A list of ForbiddenPrefixes -// +// * A SelectorRegexp, to select the import paths that the rule applies to. +// * A list of AllowedPrefixes +// * A list of ForbiddenPrefixes // An import is allowed if it matches at least one allowed prefix and does not // match any forbidden prefix. An example file looks like this: // @@ -67,11 +63,10 @@ import ( "k8s.io/gengo/args" "k8s.io/gengo/examples/import-boss/generators" - "k8s.io/klog" + "github.com/golang/glog" ) func main() { - klog.InitFlags(nil) arguments := args.Default() // Override defaults. @@ -87,8 +82,8 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Errorf("Error: %v", err) + glog.Errorf("Error: %v", err) os.Exit(1) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go index 6e5793109..5c557db73 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -25,7 +25,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "k8s.io/klog" + "github.com/golang/glog" ) // factoryGenerator produces a file of listers for a given GroupVersion and @@ -65,7 +65,7 @@ func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) { func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "{{", "}}") - klog.V(5).Infof("processing type %v", t) + glog.V(5).Infof("processing type %v", t) gvInterfaces := make(map[string]*types.Type) gvNewFuncs := make(map[string]*types.Type) @@ -152,7 +152,7 @@ func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync // as specified here. // Deprecated: Please use NewSharedInformerFactoryWithOptions instead func NewFilteredSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, namespace string, tweakListOptions {{.interfacesTweakListOptionsFunc|raw}}) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) } // NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. @@ -165,7 +165,7 @@ func NewSharedInformerFactoryWithOptions(client {{.clientSetInterface|raw}}, def startedInformers: make(map[{{.reflectType|raw}}]bool), customResync: make(map[{{.reflectType|raw}}]{{.timeDuration|raw}}), } - + // Apply all options for _, opt := range options { factory = opt(factory) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go index fc0668c5b..c78180438 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go @@ -23,7 +23,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "k8s.io/klog" + "github.com/golang/glog" ) // factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for @@ -60,7 +60,7 @@ func (g *factoryInterfaceGenerator) Imports(c *generator.Context) (imports []str func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "{{", "}}") - klog.V(5).Infof("processing type %v", t) + glog.V(5).Infof("processing type %v", t) m := map[string]interface{}{ "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), @@ -76,7 +76,6 @@ func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types. } var externalSharedInformerFactoryInterface = ` -// NewInformerFunc takes {{.clientSetPackage|raw}} and {{.timeDuration|raw}} to return a SharedIndexInformer. type NewInformerFunc func({{.clientSetPackage|raw}}, {{.timeDuration|raw}}) cache.SharedIndexInformer // SharedInformerFactory a small interface to allow for adding an informer without an import cycle @@ -85,6 +84,5 @@ type SharedInformerFactory interface { InformerFor(obj {{.runtimeObject|raw}}, newFunc NewInformerFunc) {{.cacheSharedIndexInformer|raw}} } -// TweakListOptionsFunc is a function that transforms a {{.v1ListOptions|raw}}. type TweakListOptionsFunc func(*{{.v1ListOptions|raw}}) ` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go index cad907990..54632de05 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -22,7 +22,6 @@ import ( "strings" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" @@ -57,7 +56,6 @@ func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems { "raw": namer.NewRawNamer(g.outputPackage, g.imports), "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), - "resource": codegennamer.NewTagOverrideNamer("resourceName", namer.NewAllLowercasePluralNamer(pluralExceptions)), } } @@ -113,9 +111,7 @@ func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w i GoName: namer.IC(v.Version.NonEmpty()), Resources: orderer.OrderTypes(g.typesForGroupVersion[gv]), } - func() { - schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) - }() + schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) group.Versions = append(group.Versions, version) } sort.Sort(versionSort(group.Versions)) @@ -172,7 +168,7 @@ func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResour {{range $version := .Versions -}} // Group={{$group.Name}}, Version={{.Name}} {{range .Resources -}} - case {{index $.schemeGVs $version|raw}}.WithResource("{{.|resource}}"): + case {{index $.schemeGVs $version|raw}}.WithResource("{{.|allLowercasePlural}}"): return &genericInformer{resource: resource.GroupResource(), informer: f.{{$GroupGoName}}().{{$version.GoName}}().{{.|publicPlural}}().Informer()}, nil {{end}} {{end}} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go index 9204d6215..88cc08df5 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go @@ -28,7 +28,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/klog" + "github.com/golang/glog" ) // informerGenerator produces a file of listers for a given GroupVersion and @@ -66,7 +66,7 @@ func (g *informerGenerator) Imports(c *generator.Context) (imports []string) { func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - klog.V(5).Infof("processing type %v", t) + glog.V(5).Infof("processing type %v", t) listerPackage := fmt.Sprintf("%s/%s/%s", g.listersPackage, g.groupPkgName, strings.ToLower(g.groupVersion.Version.NonEmpty())) clientSetInterface := c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go index cfb91ceba..2cc0372f8 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -22,11 +22,11 @@ import ( "path/filepath" "strings" + "github.com/golang/glog" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" - "k8s.io/klog" "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" @@ -102,12 +102,12 @@ func vendorless(p string) string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) + glog.Fatalf("Failed loading boilerplate: %v", err) } customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) if !ok { - klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) + glog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) } internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) @@ -128,7 +128,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat objectMeta, internal, err := objectMetaForPackage(p) if err != nil { - klog.Fatal(err) + glog.Fatal(err) } if objectMeta == nil { // no types in this package had genclient @@ -141,7 +141,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat if internal { lastSlash := strings.LastIndex(p.Path, "/") if lastSlash == -1 { - klog.Fatalf("error constructing internal group version for package %q", p.Path) + glog.Fatalf("error constructing internal group version for package %q", p.Path) } gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) targetGroupVersions = internalGroupVersions @@ -320,9 +320,9 @@ func versionPackage(basePackage string, groupPkgName string, gv clientgentypes.G DefaultGen: generator.DefaultGen{ OptionalName: "interface", }, - outputPackage: packagePath, - imports: generator.NewImportTracker(), - types: typesToGenerate, + outputPackage: packagePath, + imports: generator.NewImportTracker(), + types: typesToGenerate, internalInterfacesPackage: packageForInternalInterfaces(basePackage), }) diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go similarity index 95% rename from vendor/k8s.io/gengo/examples/set-gen/generators/tags.go rename to vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go index bb3b4d257..afa287815 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go @@ -17,8 +17,8 @@ limitations under the License. package generators import ( + "github.com/golang/glog" "k8s.io/gengo/types" - "k8s.io/klog" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if @@ -27,7 +27,7 @@ import ( func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { - klog.Fatalf(err.Error()) + glog.Fatal(err) } return val } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go index f80350c5f..1cd27d5cd 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go @@ -63,7 +63,7 @@ func (g *versionInterfaceGenerator) GenerateType(c *generator.Context, t *types. m := map[string]interface{}{ "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), - "types": g.types, + "types": g.types, } sw.Do(versionTemplate, m) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go index 14f3e923e..bfe826080 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go @@ -20,17 +20,16 @@ import ( "flag" "path/filepath" + "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/informer-gen/generators" "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" - "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/informer-gen/args" ) func main() { - klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -48,7 +47,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } // Run it. @@ -57,7 +56,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go index c8ed5ad4d..cde6e2f77 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -30,7 +30,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/klog" + "github.com/golang/glog" ) // NameSystems returns the name system used by the generators in this package. @@ -66,7 +66,7 @@ func DefaultNameSystem() string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) + glog.Fatalf("Failed loading boilerplate: %v", err) } var packageList generator.Packages @@ -75,7 +75,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat objectMeta, internal, err := objectMetaForPackage(p) if err != nil { - klog.Fatal(err) + glog.Fatal(err) } if objectMeta == nil { // no types in this package had genclient @@ -88,7 +88,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat if internal { lastSlash := strings.LastIndex(p.Path, "/") if lastSlash == -1 { - klog.Fatalf("error constructing internal group version for package %q", p.Path) + glog.Fatalf("error constructing internal group version for package %q", p.Path) } gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) internalGVPkg = p.Path @@ -223,7 +223,7 @@ func (g *listerGenerator) Imports(c *generator.Context) (imports []string) { func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - klog.V(5).Infof("processing type %v", t) + glog.V(5).Infof("processing type %v", t) m := map[string]interface{}{ "Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}), "type": t, diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go new file mode 100644 index 000000000..afa287815 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "github.com/golang/glog" + "k8s.io/gengo/types" +) + +// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if +// it exists, the value is boolean. If the tag did not exist, it returns +// false. +func extractBoolTagOrDie(key string, lines []string) bool { + val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + if err != nil { + glog.Fatal(err) + } + return val +} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go index aca16b2bd..d5ff8e46e 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go @@ -20,17 +20,16 @@ import ( "flag" "path/filepath" + "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/lister-gen/generators" "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" - "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/lister-gen/args" ) func main() { - klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -45,7 +44,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } // Run it. @@ -54,7 +53,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/README b/vendor/k8s.io/code-generator/cmd/openapi-gen/README new file mode 100644 index 000000000..e6dcc85d0 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/openapi-gen/README @@ -0,0 +1,13 @@ +# Generate OpenAPI definitions + +- To generate definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. +- To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines. + +# OpenAPI Extensions +OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member +add "+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE" to the comment lines before type/member. A type/member can +have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to +escape or quote the value string. Extensions can be use to pass more information to client generators or +documentation generators. For example a type my have a friendly name to be displayed in documentation or +being used in a client's fluent interface. + diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/openapi-gen/args/args.go new file mode 100644 index 000000000..f9bb17e1a --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/openapi-gen/args/args.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct{} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + genericArgs.OutputFileBaseName = "openapi_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go b/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go new file mode 100644 index 000000000..fbafc5025 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package generates openAPI definition file to be used in open API spec generation on API servers. To generate +// definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. To +// exclude a type from a tagged package, add "+k8s:openapi-gen=false" tag to the type comment lines. +package main + +import ( + "flag" + "path/filepath" + + "github.com/golang/glog" + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/kube-openapi/pkg/generators" + + generatorargs "k8s.io/code-generator/cmd/openapi-gen/args" + "k8s.io/code-generator/pkg/util" +) + +func main() { + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of openapi-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + glog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + glog.Fatalf("Error: %v", err) + } + glog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go index 5186e421f..ca13ca857 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -22,7 +22,7 @@ import ( "path" "strings" - "k8s.io/klog" + "github.com/golang/glog" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" "k8s.io/gengo/args" @@ -46,7 +46,7 @@ func DefaultNameSystem() string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) + glog.Fatalf("Failed loading boilerplate: %v", err) } packages := generator.Packages{} @@ -54,27 +54,27 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat pkg := context.Universe.Package(inputDir) internal, err := isInternal(pkg) if err != nil { - klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) + glog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) continue } if internal { - klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) + glog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) continue } registerFileName := "register.go" searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName) if _, err := os.Stat(path.Join(searchPath)); err == nil { - klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) + glog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) continue } else if err != nil && !os.IsNotExist(err) { - klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) + glog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) } gv := clientgentypes.GroupVersion{} { pathParts := strings.Split(pkg.Path, "/") if len(pathParts) < 2 { - klog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) + glog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) continue } gv.Group = clientgentypes.Group(pathParts[len(pathParts)-2]) @@ -84,14 +84,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // extract the fully qualified API group name from it and overwrite the group inferred from the package path if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil { groupName := override[0] - klog.V(5).Infof("overriding the group name with = %s", groupName) + glog.V(5).Infof("overriding the group name with = %s", groupName) gv.Group = clientgentypes.Group(groupName) } } typesToRegister := []*types.Type{} for _, t := range pkg.Types { - klog.V(5).Infof("considering type = %s", t.Name.String()) + glog.V(5).Infof("considering type = %s", t.Name.String()) for _, typeMember := range t.Members { if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { typesToRegister = append(typesToRegister, t) diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/main.go b/vendor/k8s.io/code-generator/cmd/register-gen/main.go index 30a175d8d..db02a4af4 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/main.go @@ -20,8 +20,8 @@ import ( "flag" "path/filepath" + "github.com/golang/glog" "github.com/spf13/pflag" - "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/register-gen/args" "k8s.io/code-generator/cmd/register-gen/generators" @@ -30,7 +30,6 @@ import ( ) func main() { - klog.InitFlags(nil) genericArgs := generatorargs.NewDefaults() genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.AddFlags(pflag.CommandLine) @@ -39,7 +38,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } if err := genericArgs.Execute( @@ -47,7 +46,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Fatalf("Error: %v", err) + glog.Fatalf("Error: %v", err) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/main.go b/vendor/k8s.io/code-generator/cmd/set-gen/main.go index 45694d4f3..cf8f01d89 100644 --- a/vendor/k8s.io/code-generator/cmd/set-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/set-gen/main.go @@ -32,11 +32,10 @@ import ( "k8s.io/gengo/args" "k8s.io/gengo/examples/set-gen/generators" - "k8s.io/klog" + "github.com/golang/glog" ) func main() { - klog.InitFlags(nil) arguments := args.Default() // Override defaults. @@ -49,8 +48,8 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - klog.Errorf("Error: %v", err) + glog.Errorf("Error: %v", err) os.Exit(1) } - klog.V(2).Info("Completed successfully.") + glog.V(2).Info("Completed successfully.") } diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh index d82002dda..d7ad5b2e0 100755 --- a/vendor/k8s.io/code-generator/generate-groups.sh +++ b/vendor/k8s.io/code-generator/generate-groups.sh @@ -23,7 +23,7 @@ set -o pipefail if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then cat < ... +Usage: $(basename $0) ... the generators comma separated to run (deepcopy,defaulter,client,lister,informer) or "all". the output package name (e.g. github.com/example/project/pkg/generated). @@ -34,8 +34,8 @@ Usage: $(basename "$0") ... +Usage: $(basename $0) ... the generators comma separated to run (deepcopy,defaulter,conversion,client,lister,informer) or "all". the output package name (e.g. github.com/example/project/pkg/generated). @@ -34,8 +34,8 @@ Usage: $(basename "$0") golang.org/x/sys v0.0.0-20190209173611-3b5209105503 - golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db - golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 -) diff --git a/vendor/k8s.io/code-generator/go.sum b/vendor/k8s.io/code-generator/go.sum deleted file mode 100644 index 51c89be8d..000000000 --- a/vendor/k8s.io/code-generator/go.sum +++ /dev/null @@ -1,29 +0,0 @@ -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/gogo/protobuf v1.0.0 h1:2jyBKDKU/8v3v2xVR2PtiWQviFUyiaGk2rpfyFT8rTM= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFhxXGKWHMIRUI/T5x1GP90= -golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68= -k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= diff --git a/vendor/k8s.io/code-generator/hack/update-codegen.sh b/vendor/k8s.io/code-generator/hack/update-codegen.sh index 89d84da41..767c2ab36 100755 --- a/vendor/k8s.io/code-generator/hack/update-codegen.sh +++ b/vendor/k8s.io/code-generator/hack/update-codegen.sh @@ -22,15 +22,11 @@ set -o pipefail # - --output-base because this script should also be able to run inside the vendor dir of # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir # instead of the $GOPATH directly. For normal projects this can be dropped. -"$(dirname "${BASH_SOURCE[0]}")"/../generate-internal-groups.sh all \ +$(dirname ${BASH_SOURCE})/../generate-internal-groups.sh all \ k8s.io/code-generator/_examples/apiserver k8s.io/code-generator/_examples/apiserver/apis k8s.io/code-generator/_examples/apiserver/apis \ "example:v1 example2:v1" \ - --output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." -"$(dirname "${BASH_SOURCE[0]}")"/../generate-groups.sh all \ + --output-base "$(dirname ${BASH_SOURCE})/../../.." +$(dirname ${BASH_SOURCE})/../generate-groups.sh all \ k8s.io/code-generator/_examples/crd k8s.io/code-generator/_examples/crd/apis \ "example:v1 example2:v1" \ - --output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." -"$(dirname "${BASH_SOURCE[0]}")"/../generate-groups.sh all \ - k8s.io/code-generator/_examples/MixedCase k8s.io/code-generator/_examples/MixedCase/apis \ - "example:v1" \ - --output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." + --output-base "$(dirname ${BASH_SOURCE})/../../.." diff --git a/vendor/k8s.io/code-generator/hack/verify-codegen.sh b/vendor/k8s.io/code-generator/hack/verify-codegen.sh index a03412c42..25302228d 100755 --- a/vendor/k8s.io/code-generator/hack/verify-codegen.sh +++ b/vendor/k8s.io/code-generator/hack/verify-codegen.sh @@ -18,7 +18,8 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. +SCRIPT_BASE=${SCRIPT_ROOT}/../.. DIFFROOT="${SCRIPT_ROOT}/_examples" TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/_examples" @@ -49,6 +50,4 @@ fi # smoke test echo "Smoke testing _example by compiling..." -go build "./${SCRIPT_ROOT}/_examples/crd/..." -go build "./${SCRIPT_ROOT}/_examples/apiserver/..." -go build "./${SCRIPT_ROOT}/_examples/MixedCase/..." +go build ${SCRIPT_ROOT}/_example/... \ No newline at end of file diff --git a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go deleted file mode 100644 index fd8c3a855..000000000 --- a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namer - -import ( - "k8s.io/gengo/namer" - "k8s.io/gengo/types" -) - -// TagOverrideNamer is a namer which pulls names from a given tag, if specified, -// and otherwise falls back to a different namer. -type TagOverrideNamer struct { - tagName string - fallback namer.Namer -} - -// Name returns the tag value if it exists. It no tag was found the fallback namer will be used -func (n *TagOverrideNamer) Name(t *types.Type) string { - if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { - return nameOverride - } - - return n.fallback.Name(t) -} - -// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as -// the name, or falls back to another Namer if the tag is not present. -func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { - return &TagOverrideNamer{ - tagName: tagName, - fallback: fallback, - } -} - -// extractTag gets the comment-tags for the key. If the tag did not exist, it -// returns the empty string. -func extractTag(key string, lines []string) string { - val, present := types.ExtractCommentTags("+", lines)[key] - if !present || len(val) < 1 { - return "" - } - - return val[0] -} diff --git a/vendor/k8s.io/code-generator/tools.go b/vendor/k8s.io/code-generator/tools.go deleted file mode 100644 index 9cc6d9f30..000000000 --- a/vendor/k8s.io/code-generator/tools.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build tools - -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package contains code generation utilities -// This package imports things required by build scripts, to force `go mod` to see them as dependencies -package tools - -import ( - _ "k8s.io/code-generator/cmd/client-gen" - _ "k8s.io/code-generator/cmd/conversion-gen" - _ "k8s.io/code-generator/cmd/deepcopy-gen" - _ "k8s.io/code-generator/cmd/defaulter-gen" - _ "k8s.io/code-generator/cmd/go-to-protobuf" - _ "k8s.io/code-generator/cmd/import-boss" - _ "k8s.io/code-generator/cmd/informer-gen" - _ "k8s.io/code-generator/cmd/lister-gen" - _ "k8s.io/code-generator/cmd/register-gen" - _ "k8s.io/code-generator/cmd/set-gen" -) diff --git a/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go b/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go deleted file mode 100644 index ea5716b6c..000000000 --- a/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go +++ /dev/null @@ -1,290 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package generators has the generators for the import-boss utility. -package generators - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "sort" - "strings" - - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - - "k8s.io/klog" -) - -const ( - importBossFileType = "import-boss" -) - -// NameSystems returns the name system used by the generators in this package. -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "raw": namer.NewRawNamer("", nil), - } -} - -// DefaultNameSystem returns the default name system for ordering the types to be -// processed by the generators in this package. -func DefaultNameSystem() string { - return "raw" -} - -// Packages makes the import-boss package definition. -func Packages(c *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - pkgs := generator.Packages{} - c.FileTypes = map[string]generator.FileType{ - importBossFileType: importRuleFile{}, - } - - for _, p := range c.Universe { - if !arguments.InputIncludes(p) { - // Don't run on e.g. third party dependencies. - continue - } - savedPackage := p - pkgs = append(pkgs, &generator.DefaultPackage{ - PackageName: p.Name, - PackagePath: p.Path, - // GeneratorFunc returns a list of generators. Each generator makes a - // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{&importRules{ - myPackage: savedPackage, - }} - }, - FilterFunc: func(c *generator.Context, t *types.Type) bool { - return false - }, - }) - } - - return pkgs -} - -// A single import restriction rule. -type Rule struct { - // All import paths that match this regexp... - SelectorRegexp string - // ... must have one of these prefixes ... - AllowedPrefixes []string - // ... and must not have one of these prefixes. - ForbiddenPrefixes []string -} - -type fileFormat struct { - CurrentImports []string - - Rules []Rule -} - -func readFile(path string) (*fileFormat, error) { - currentBytes, err := ioutil.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("couldn't read %v: %v", path, err) - } - - var current fileFormat - err = json.Unmarshal(currentBytes, ¤t) - if err != nil { - return nil, fmt.Errorf("couldn't unmarshal %v: %v", path, err) - } - return ¤t, nil -} - -func writeFile(path string, ff *fileFormat) error { - raw, err := json.MarshalIndent(ff, "", "\t") - if err != nil { - return fmt.Errorf("couldn't format data for file %v.\n%#v", path, ff) - } - f, err := os.Create(path) - if err != nil { - return fmt.Errorf("couldn't open %v for writing: %v", path, err) - } - defer f.Close() - _, err = f.Write(raw) - return err -} - -// This does the actual checking, since it knows the literal destination file. -type importRuleFile struct{} - -func (importRuleFile) AssembleFile(f *generator.File, path string) error { - return nil -} - -// TODO: make a flag to enable this, or expose this information in some other way. -func (importRuleFile) listEntireImportTree(f *generator.File, path string) error { - // If the file exists, populate its current imports. This is mostly to help - // humans figure out what they need to fix. - if _, err := os.Stat(path); err != nil { - // Ignore packages which haven't opted in by adding an .import-restrictions file. - return nil - } - - current, err := readFile(path) - if err != nil { - return err - } - - current.CurrentImports = []string{} - for v := range f.Imports { - current.CurrentImports = append(current.CurrentImports, v) - } - sort.Strings(current.CurrentImports) - - return writeFile(path, current) -} - -// removeLastDir removes the last directory, but leaves the file name -// unchanged. It returns the new path and the removed directory. So: -// "a/b/c/file" -> ("a/b/file", "c") -func removeLastDir(path string) (newPath, removedDir string) { - dir, file := filepath.Split(path) - dir = strings.TrimSuffix(dir, string(filepath.Separator)) - return filepath.Join(filepath.Dir(dir), file), filepath.Base(dir) -} - -// Keep going up a directory until we find an .import-restrictions file. -func recursiveRead(path string) (*fileFormat, string, error) { - for { - if _, err := os.Stat(path); err == nil { - ff, err := readFile(path) - return ff, path, err - } - - nextPath, removedDir := removeLastDir(path) - if nextPath == path || removedDir == "src" { - break - } - path = nextPath - } - return nil, "", nil -} - -func (importRuleFile) VerifyFile(f *generator.File, path string) error { - rules, actualPath, err := recursiveRead(path) - if err != nil { - return fmt.Errorf("error finding rules file: %v", err) - } - - if rules == nil { - // No restrictions on this directory. - return nil - } - - forbiddenImports := map[string]string{} - allowedMismatchedImports := []string{} - for _, r := range rules.Rules { - re, err := regexp.Compile(r.SelectorRegexp) - if err != nil { - return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, actualPath, err) - } - for v := range f.Imports { - klog.V(4).Infof("Checking %v matches %v: %v\n", r.SelectorRegexp, v, re.MatchString(v)) - if !re.MatchString(v) { - continue - } - for _, forbidden := range r.ForbiddenPrefixes { - klog.V(4).Infof("Checking %v against %v\n", v, forbidden) - if strings.HasPrefix(v, forbidden) { - forbiddenImports[v] = forbidden - } - } - found := false - for _, allowed := range r.AllowedPrefixes { - klog.V(4).Infof("Checking %v against %v\n", v, allowed) - if strings.HasPrefix(v, allowed) { - found = true - break - } - } - if !found { - allowedMismatchedImports = append(allowedMismatchedImports, v) - } - } - } - - if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 { - var errorBuilder strings.Builder - for i, f := range forbiddenImports { - fmt.Fprintf(&errorBuilder, "import %v has forbidden prefix %v\n", i, f) - } - if len(allowedMismatchedImports) > 0 { - sort.Sort(sort.StringSlice(allowedMismatchedImports)) - fmt.Fprintf(&errorBuilder, "the following imports did not match any allowed prefix:\n") - for _, i := range allowedMismatchedImports { - fmt.Fprintf(&errorBuilder, " %v\n", i) - } - } - return errors.New(errorBuilder.String()) - } - if len(rules.Rules) > 0 { - klog.V(2).Infof("%v passes rules found in %v\n", path, actualPath) - } - - return nil -} - -// importRules produces a file with a set for a single type. -type importRules struct { - myPackage *types.Package - imports namer.ImportTracker -} - -var ( - _ = generator.Generator(&importRules{}) - _ = generator.FileType(importRuleFile{}) -) - -func (r *importRules) Name() string { return "import rules" } -func (r *importRules) Filter(*generator.Context, *types.Type) bool { return false } -func (r *importRules) Namers(*generator.Context) namer.NameSystems { return nil } -func (r *importRules) PackageVars(*generator.Context) []string { return []string{} } -func (r *importRules) PackageConsts(*generator.Context) []string { return []string{} } -func (r *importRules) GenerateType(*generator.Context, *types.Type, io.Writer) error { return nil } -func (r *importRules) Filename() string { return ".import-restrictions" } -func (r *importRules) FileType() string { return importBossFileType } -func (r *importRules) Init(c *generator.Context, w io.Writer) error { return nil } -func (r *importRules) Finalize(*generator.Context, io.Writer) error { return nil } - -func dfsImports(dest *[]string, seen map[string]bool, p *types.Package) { - for _, p2 := range p.Imports { - if seen[p2.Path] { - continue - } - seen[p2.Path] = true - dfsImports(dest, seen, p2) - *dest = append(*dest, p2.Path) - } -} - -func (r *importRules) Imports(*generator.Context) []string { - all := []string{} - dfsImports(&all, map[string]bool{}, r.myPackage) - return all -} diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go deleted file mode 100644 index d0698d33c..000000000 --- a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package generators has the generators for the set-gen utility. -package generators - -import ( - "io" - - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - - "k8s.io/klog" -) - -// NameSystems returns the name system used by the generators in this package. -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "public": namer.NewPublicNamer(0), - "private": namer.NewPrivateNamer(0), - "raw": namer.NewRawNamer("", nil), - } -} - -// DefaultNameSystem returns the default name system for ordering the types to be -// processed by the generators in this package. -func DefaultNameSystem() string { - return "public" -} - -// Packages makes the sets package definition. -func Packages(_ *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() - if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) - } - - return generator.Packages{&generator.DefaultPackage{ - PackageName: "sets", - PackagePath: arguments.OutputPackagePath, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// Package sets has auto-generated set types. -`), - // GeneratorFunc returns a list of generators. Each generator makes a - // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - generators = []generator.Generator{ - // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, - // Make a separate file for the Empty type, since it's shared by every type. - generator.DefaultGen{ - OptionalName: "empty", - OptionalBody: []byte(emptyTypeDecl), - }, - } - // Since we want a file per type that we generate a set for, we - // have to provide a function for this. - for _, t := range c.Order { - generators = append(generators, &genSet{ - DefaultGen: generator.DefaultGen{ - // Use the privatized version of the - // type name as the file name. - // - // TODO: make a namer that converts - // camelCase to '-' separation for file - // names? - OptionalName: c.Namers["private"].Name(t), - }, - outputPackage: arguments.OutputPackagePath, - typeToMatch: t, - imports: generator.NewImportTracker(), - }) - } - return generators - }, - FilterFunc: func(c *generator.Context, t *types.Type) bool { - // It would be reasonable to filter by the type's package here. - // It might be necessary if your input directory has a big - // import graph. - switch t.Kind { - case types.Map, types.Slice, types.Pointer: - // These types can't be keys in a map. - return false - case types.Builtin: - return true - case types.Struct: - // Only some structs can be keys in a map. This is triggered by the line - // // +genset - // or - // // +genset=true - return extractBoolTagOrDie("genset", t.CommentLines) == true - } - return false - }, - }} -} - -// genSet produces a file with a set for a single type. -type genSet struct { - generator.DefaultGen - outputPackage string - typeToMatch *types.Type - imports namer.ImportTracker -} - -// Filter ignores all but one type because we're making a single file per type. -func (g *genSet) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } - -func (g *genSet) Namers(c *generator.Context) namer.NameSystems { - return namer.NameSystems{ - "raw": namer.NewRawNamer(g.outputPackage, g.imports), - } -} - -func (g *genSet) Imports(c *generator.Context) (imports []string) { - return append(g.imports.ImportLines(), "reflect", "sort") -} - -// args constructs arguments for templates. Usage: -// g.args(t, "key1", value1, "key2", value2, ...) -// -// 't' is loaded with the key 'type'. -// -// We could use t directly as the argument, but doing it this way makes it easy -// to mix in additional parameters. This feature is not used in this set -// generator, but is present as an example. -func (g *genSet) args(t *types.Type, kv ...interface{}) interface{} { - m := map[interface{}]interface{}{"type": t} - for i := 0; i < len(kv)/2; i++ { - m[kv[i*2]] = kv[i*2+1] - } - return m -} - -// GenerateType makes the body of a file implementing a set for type t. -func (g *genSet) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - sw := generator.NewSnippetWriter(w, c, "$", "$") - sw.Do(setCode, g.args(t)) - sw.Do("func less$.type|public$(lhs, rhs $.type|raw$) bool {\n", g.args(t)) - g.lessBody(sw, t) - sw.Do("}\n", g.args(t)) - return sw.Error() -} - -func (g *genSet) lessBody(sw *generator.SnippetWriter, t *types.Type) { - // TODO: make this recursive, handle pointers and multiple nested structs... - switch t.Kind { - case types.Struct: - for _, m := range types.FlattenMembers(t.Members) { - sw.Do("if lhs.$.Name$ < rhs.$.Name$ { return true }\n", m) - sw.Do("if lhs.$.Name$ > rhs.$.Name$ { return false }\n", m) - } - sw.Do("return false\n", nil) - default: - sw.Do("return lhs < rhs\n", nil) - } -} - -// written to the "empty.go" file. -var emptyTypeDecl = ` -// Empty is public since it is used by some internal API objects for conversions between external -// string arrays and internal sets, and conversion logic requires public types today. -type Empty struct{} -` - -// Written for every type. If you've never used text/template before: -// $.type$ refers to the source type; |public means to -// call the function giving the public name, |raw the raw type name. -var setCode = `// sets.$.type|public$ is a set of $.type|raw$s, implemented via map[$.type|raw$]struct{} for minimal memory consumption. -type $.type|public$ map[$.type|raw$]Empty - -// New$.type|public$ creates a $.type|public$ from a list of values. -func New$.type|public$(items ...$.type|raw$) $.type|public$ { - ss := $.type|public${} - ss.Insert(items...) - return ss -} - -// $.type|public$KeySet creates a $.type|public$ from a keys of a map[$.type|raw$](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func $.type|public$KeySet(theMap interface{}) $.type|public$ { - v := reflect.ValueOf(theMap) - ret := $.type|public${} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().($.type|raw$)) - } - return ret -} - -// Insert adds items to the set. -func (s $.type|public$) Insert(items ...$.type|raw$) { - for _, item := range items { - s[item] = Empty{} - } -} - -// Delete removes all items from the set. -func (s $.type|public$) Delete(items ...$.type|raw$) { - for _, item := range items { - delete(s, item) - } -} - -// Has returns true if and only if item is contained in the set. -func (s $.type|public$) Has(item $.type|raw$) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s $.type|public$) HasAll(items ...$.type|raw$) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s $.type|public$) HasAny(items ...$.type|raw$) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s $.type|public$) Difference(s2 $.type|public$) $.type|public$ { - result := New$.type|public$() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 $.type|public$) Union(s2 $.type|public$) $.type|public$ { - result := New$.type|public$() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 $.type|public$) Intersection(s2 $.type|public$) $.type|public$ { - var walk, other $.type|public$ - result := New$.type|public$() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 $.type|public$) IsSuperset(s2 $.type|public$) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 $.type|public$) Equal(s2 $.type|public$) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOf$.type|public$ []$.type|raw$ - -func (s sortableSliceOf$.type|public$) Len() int { return len(s) } -func (s sortableSliceOf$.type|public$) Less(i, j int) bool { return less$.type|public$(s[i], s[j]) } -func (s sortableSliceOf$.type|public$) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted $.type|raw$ slice. -func (s $.type|public$) List() []$.type|raw$ { - res := make(sortableSliceOf$.type|public$, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []$.type|raw$(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s $.type|public$) UnsortedList() []$.type|raw$ { - res :=make([]$.type|raw$, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s $.type|public$) PopAny() ($.type|raw$, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue $.type|raw$ - return zeroValue, false -} - -// Len returns the size of the set. -func (s $.type|public$) Len() int { - return len(s) -} - -` diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index 10330d7ef..733d14b73 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -35,15 +35,15 @@ // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. // -// By default, all log statements write to standard error. +// By default, all log statements write to files in a temporary directory. // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=true +// -logtostderr=false // Logs are written to standard error instead of to files. // -alsologtostderr=false // Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR +// -stderrthreshold=INFO // Log events at or above this severity are logged to standard // error as well as to files. // -log_dir="" @@ -78,7 +78,6 @@ import ( "fmt" "io" stdLog "log" - "math" "os" "path/filepath" "runtime" @@ -397,43 +396,24 @@ type flushSyncWriter interface { } func init() { - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog + // Default stderrThreshold is INFO. + logging.stderrThreshold = infoLog logging.setVState(0, nil, false) go logging.flushDaemon() } -var initDefaultsOnce sync.Once - -// InitFlags is for explicitly initializing the flags. +// InitFlags is for explicitly initializing the flags func InitFlags(flagset *flag.FlagSet) { - - // Initialize defaults. - initDefaultsOnce.Do(func() { - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.skipLogHeaders = false - }) - if flagset == nil { flagset = flag.CommandLine } - - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") - flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ - "If the value is 0, the maximum file size is unlimited.") - flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") + flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file") + flagset.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -491,15 +471,8 @@ type loggingT struct { // with the log-dir option. logFile string - // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the - // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. - logFileMaxSizeMB uint64 - // If true, do not add the prefix headers, useful when used with SetOutput skipHeaders bool - - // If true, do not add the headers to log files - skipLogHeaders bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -766,7 +739,9 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo } data := buf.Bytes() if l.toStderr { - os.Stderr.Write(data) + if s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) @@ -888,33 +863,18 @@ func (l *loggingT) exit(err error) { type syncBuffer struct { logger *loggingT *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file - maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } -// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. -func CalculateMaxSize() uint64 { - if logging.logFile != "" { - if logging.logFileMaxSizeMB == 0 { - // If logFileMaxSizeMB is zero, we don't have limitations on the log size. - return math.MaxUint64 - } - // Flag logFileMaxSizeMB is in MB for user convenience. - return logging.logFileMaxSizeMB * 1024 * 1024 - } - // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. - return MaxSize -} - func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= sb.maxbytes { - if err := sb.rotateFile(time.Now(), false); err != nil { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { sb.logger.exit(err) } } @@ -927,15 +887,13 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) { } // rotateFile closes the syncBuffer's file and starts a new one. -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { +func (sb *syncBuffer) rotateFile(now time.Time) error { if sb.file != nil { sb.Flush() sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.file, _, err = create(severityName[sb.sev], now) sb.nbytes = 0 if err != nil { return err @@ -943,10 +901,6 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - if sb.logger.skipLogHeaders { - return nil - } - // Write header. var buf bytes.Buffer fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) @@ -971,11 +925,10 @@ func (l *loggingT) createFiles(sev severity) error { // has already been created, we can stop. for s := sev; s >= infoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ - logger: l, - sev: s, - maxbytes: CalculateMaxSize(), + logger: l, + sev: s, } - if err := sb.rotateFile(now, true); err != nil { + if err := sb.rotateFile(now); err != nil { return err } l.file[s] = sb diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go index e4010ad4d..b76a4e10b 100644 --- a/vendor/k8s.io/klog/klog_file.go +++ b/vendor/k8s.io/klog/klog_file.go @@ -97,11 +97,9 @@ var onceLogDirs sync.Once // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { +func create(tag string, t time.Time) (f *os.File, filename string, err error) { if logging.logFile != "" { - f, err := openOrCreate(logging.logFile, startup) + f, err := os.Create(logging.logFile) if err == nil { return f, logging.logFile, nil } @@ -115,7 +113,7 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, var lastErr error for _, dir := range logDirs { fname := filepath.Join(dir, name) - f, err := openOrCreate(fname, startup) + f, err := os.Create(fname) if err == nil { symlink := filepath.Join(dir, link) os.Remove(symlink) // ignore err @@ -126,14 +124,3 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } - -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func openOrCreate(name string, startup bool) (*os.File, error) { - if startup { - f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - return f, err - } - f, err := os.Create(name) - return f, err -} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go deleted file mode 100644 index f672d88f2..000000000 --- a/vendor/k8s.io/utils/trace/trace.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package trace - -import ( - "bytes" - "fmt" - "math/rand" - "time" - - "k8s.io/klog" -) - -type traceStep struct { - stepTime time.Time - msg string -} - -// Trace keeps track of a set of "steps" and allows us to log a specific -// step if it took longer than its share of the total allowed time -type Trace struct { - name string - startTime time.Time - steps []traceStep -} - -// New creates a Trace with the specified name -func New(name string) *Trace { - return &Trace{name, time.Now(), nil} -} - -// Step adds a new step with a specific message. Call this at the end of an -// execution step to record how long it took. -func (t *Trace) Step(msg string) { - if t.steps == nil { - // traces almost always have less than 6 steps, do this to avoid more than a single allocation - t.steps = make([]traceStep, 0, 6) - } - t.steps = append(t.steps, traceStep{time.Now(), msg}) -} - -// Log is used to dump all the steps in the Trace -func (t *Trace) Log() { - // an explicit logging request should dump all the steps out at the higher level - t.logWithStepThreshold(0) -} - -func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { - var buffer bytes.Buffer - tracenum := rand.Int31() - endTime := time.Now() - - totalTime := endTime.Sub(t.startTime) - buffer.WriteString(fmt.Sprintf("Trace[%d]: %q (started: %v) (total time: %v):\n", tracenum, t.name, t.startTime, totalTime)) - lastStepTime := t.startTime - for _, step := range t.steps { - stepDuration := step.stepTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { - buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) - } - lastStepTime = step.stepTime - } - stepDuration := endTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { - buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] END\n", tracenum, endTime.Sub(t.startTime), stepDuration)) - } - - klog.Info(buffer.String()) -} - -// LogIfLong is used to dump steps that took longer than its share -func (t *Trace) LogIfLong(threshold time.Duration) { - if time.Since(t.startTime) >= threshold { - // if any step took more than it's share of the total allowed time, it deserves a higher log level - stepThreshold := threshold / time.Duration(len(t.steps)+1) - t.logWithStepThreshold(stepThreshold) - } -} - -// TotalTime can be used to figure out how long it took since the Trace was created -func (t *Trace) TotalTime() time.Duration { - return time.Since(t.startTime) -} diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go deleted file mode 100644 index ab3e06a22..000000000 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ /dev/null @@ -1,14 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -// +build go1.10 - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -}